mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
30 #include "libavutil/imgutils.h"
31 #include "avcodec.h"
32 #include "dsputil.h"
33 #include "internal.h"
34 #include "mathops.h"
35 #include "mpegvideo.h"
36 #include "mjpegenc.h"
37 #include "msmpeg4.h"
38 #include "xvmc_internal.h"
39 #include "thread.h"
40 #include <limits.h>
41 
42 //#undef NDEBUG
43 //#include <assert.h>
44 
46  DCTELEM *block, int n, int qscale);
48  DCTELEM *block, int n, int qscale);
50  DCTELEM *block, int n, int qscale);
52  DCTELEM *block, int n, int qscale);
54  DCTELEM *block, int n, int qscale);
56  DCTELEM *block, int n, int qscale);
58  DCTELEM *block, int n, int qscale);
59 
60 
61 /* enable all paranoid tests for rounding, overflows, etc... */
62 //#define PARANOID
63 
64 //#define DEBUG
65 
66 
68 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
69  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
70  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
71 };
72 
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
75  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
83 };
84 
85 static const uint8_t mpeg2_dc_scale_table1[128] = {
86 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
87  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
94  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
95 };
96 
97 static const uint8_t mpeg2_dc_scale_table2[128] = {
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
99  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
106  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
107 };
108 
109 static const uint8_t mpeg2_dc_scale_table3[128] = {
110 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
111  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
118  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
119 };
120 
121 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
126 };
127 
131 };
132 
139 };
140 
142  const uint8_t *end,
143  uint32_t * restrict state)
144 {
145  int i;
146 
147  assert(p <= end);
148  if (p >= end)
149  return end;
150 
151  for (i = 0; i < 3; i++) {
152  uint32_t tmp = *state << 8;
153  *state = tmp + *(p++);
154  if (tmp == 0x100 || p == end)
155  return p;
156  }
157 
158  while (p < end) {
159  if (p[-1] > 1 ) p += 3;
160  else if (p[-2] ) p += 2;
161  else if (p[-3]|(p[-1]-1)) p++;
162  else {
163  p++;
164  break;
165  }
166  }
167 
168  p = FFMIN(p, end) - 4;
169  *state = AV_RB32(p);
170 
171  return p + 4;
172 }
173 
174 /* init common dct for both encoder and decoder */
176 {
177  ff_dsputil_init(&s->dsp, s->avctx);
179 
185  if (s->flags & CODEC_FLAG_BITEXACT)
188 
189 #if ARCH_X86
191 #elif ARCH_ALPHA
193 #elif ARCH_ARM
195 #elif HAVE_ALTIVEC
197 #elif ARCH_BFIN
199 #endif
200 
201  /* load & permutate scantables
202  * note: only wmv uses different ones
203  */
204  if (s->alternate_scan) {
207  } else {
210  }
213 
214  return 0;
215 }
216 
218 {
219  *dst = *src;
220  dst->f.type = FF_BUFFER_TYPE_COPY;
221 }
222 
227 {
228  /* WM Image / Screen codecs allocate internal buffers with different
229  * dimensions / colorspaces; ignore user-defined callbacks for these. */
230  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
233  ff_thread_release_buffer(s->avctx, &pic->f);
234  else
237 }
238 
240 {
241  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
242 
243  // edge emu needs blocksize + filter length - 1
244  // (= 17x17 for halfpel / 21x21 for h264)
245  // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
246  // at uvlinesize. It supports only YUV420 so 24x24 is enough
247  // linesize * interlaced * MBsize
248  FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 2 * 24,
249  fail);
250 
251  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 3,
252  fail)
253  s->me.temp = s->me.scratchpad;
254  s->rd_scratchpad = s->me.scratchpad;
255  s->b_scratchpad = s->me.scratchpad;
256  s->obmc_scratchpad = s->me.scratchpad + 16;
257 
258  return 0;
259 fail:
261  return AVERROR(ENOMEM);
262 }
263 
268 {
269  int r, ret;
270 
271  if (s->avctx->hwaccel) {
272  assert(!pic->f.hwaccel_picture_private);
273  if (s->avctx->hwaccel->priv_data_size) {
275  if (!pic->f.hwaccel_picture_private) {
276  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
277  return -1;
278  }
279  }
280  }
281 
282  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
285  r = ff_thread_get_buffer(s->avctx, &pic->f);
286  else
287  r = avcodec_default_get_buffer(s->avctx, &pic->f);
288 
289  if (r < 0 || !pic->f.type || !pic->f.data[0]) {
290  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
291  r, pic->f.type, pic->f.data[0]);
293  return -1;
294  }
295 
296  if (s->linesize && (s->linesize != pic->f.linesize[0] ||
297  s->uvlinesize != pic->f.linesize[1])) {
299  "get_buffer() failed (stride changed)\n");
300  free_frame_buffer(s, pic);
301  return -1;
302  }
303 
304  if (pic->f.linesize[1] != pic->f.linesize[2]) {
306  "get_buffer() failed (uv stride mismatch)\n");
307  free_frame_buffer(s, pic);
308  return -1;
309  }
310 
311  if (!s->edge_emu_buffer &&
312  (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
314  "get_buffer() failed to allocate context scratch buffers.\n");
315  free_frame_buffer(s, pic);
316  return ret;
317  }
318 
319  return 0;
320 }
321 
326 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
327 {
328  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
329 
330  // the + 1 is needed so memset(,,stride*height) does not sig11
331 
332  const int mb_array_size = s->mb_stride * s->mb_height;
333  const int b8_array_size = s->b8_stride * s->mb_height * 2;
334  const int b4_array_size = s->b4_stride * s->mb_height * 4;
335  int i;
336  int r = -1;
337 
338  if (shared) {
339  assert(pic->f.data[0]);
340  assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
342  } else {
343  assert(!pic->f.data[0]);
344 
345  if (alloc_frame_buffer(s, pic) < 0)
346  return -1;
347 
348  s->linesize = pic->f.linesize[0];
349  s->uvlinesize = pic->f.linesize[1];
350  }
351 
352  if (pic->f.qscale_table == NULL) {
353  if (s->encoding) {
354  FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
355  mb_array_size * sizeof(int16_t), fail)
357  mb_array_size * sizeof(int16_t), fail)
359  mb_array_size * sizeof(int8_t ), fail)
360  }
361 
363  mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
365  (big_mb_num + s->mb_stride) * sizeof(uint8_t),
366  fail)
368  (big_mb_num + s->mb_stride) * sizeof(uint32_t),
369  fail)
370  pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
371  pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
372  if (s->out_format == FMT_H264) {
373  for (i = 0; i < 2; i++) {
375  2 * (b4_array_size + 4) * sizeof(int16_t),
376  fail)
377  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
378  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
379  4 * mb_array_size * sizeof(uint8_t), fail)
380  }
381  pic->f.motion_subsample_log2 = 2;
382  } else if (s->out_format == FMT_H263 || s->encoding ||
383  (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
384  for (i = 0; i < 2; i++) {
386  2 * (b8_array_size + 4) * sizeof(int16_t),
387  fail)
388  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
389  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
390  4 * mb_array_size * sizeof(uint8_t), fail)
391  }
392  pic->f.motion_subsample_log2 = 3;
393  }
394  if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
396  64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
397  }
398  pic->f.qstride = s->mb_stride;
400  1 * sizeof(AVPanScan), fail)
401  }
402 
403  pic->owner2 = s;
404 
405  return 0;
406 fail: // for the FF_ALLOCZ_OR_GOTO macro
407  if (r >= 0)
408  free_frame_buffer(s, pic);
409  return -1;
410 }
411 
415 static void free_picture(MpegEncContext *s, Picture *pic)
416 {
417  int i;
418 
419  if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
420  free_frame_buffer(s, pic);
421  }
422 
423  av_freep(&pic->mb_var);
424  av_freep(&pic->mc_mb_var);
425  av_freep(&pic->mb_mean);
426  av_freep(&pic->f.mbskip_table);
428  pic->f.qscale_table = NULL;
429  av_freep(&pic->mb_type_base);
430  pic->f.mb_type = NULL;
431  av_freep(&pic->f.dct_coeff);
432  av_freep(&pic->f.pan_scan);
433  pic->f.mb_type = NULL;
434  for (i = 0; i < 2; i++) {
435  av_freep(&pic->motion_val_base[i]);
436  av_freep(&pic->f.ref_index[i]);
437  pic->f.motion_val[i] = NULL;
438  }
439 
440  if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
441  for (i = 0; i < 4; i++) {
442  pic->f.base[i] =
443  pic->f.data[i] = NULL;
444  }
445  pic->f.type = 0;
446  }
447 }
448 
450 {
451  int y_size = s->b8_stride * (2 * s->mb_height + 1);
452  int c_size = s->mb_stride * (s->mb_height + 1);
453  int yc_size = y_size + 2 * c_size;
454  int i;
455 
456  s->edge_emu_buffer =
457  s->me.scratchpad =
458  s->me.temp =
459  s->rd_scratchpad =
460  s->b_scratchpad =
461  s->obmc_scratchpad = NULL;
462 
463  if (s->encoding) {
464  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
465  ME_MAP_SIZE * sizeof(uint32_t), fail)
467  ME_MAP_SIZE * sizeof(uint32_t), fail)
468  if (s->avctx->noise_reduction) {
470  2 * 64 * sizeof(int), fail)
471  }
472  }
473  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(DCTELEM), fail)
474  s->block = s->blocks[0];
475 
476  for (i = 0; i < 12; i++) {
477  s->pblocks[i] = &s->block[i];
478  }
479 
480  if (s->out_format == FMT_H263) {
481  /* ac values */
483  yc_size * sizeof(int16_t) * 16, fail);
484  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
485  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
486  s->ac_val[2] = s->ac_val[1] + c_size;
487  }
488 
489  return 0;
490 fail:
491  return -1; // free() through ff_MPV_common_end()
492 }
493 
495 {
496  if (s == NULL)
497  return;
498 
500  av_freep(&s->me.scratchpad);
501  s->me.temp =
502  s->rd_scratchpad =
503  s->b_scratchpad =
504  s->obmc_scratchpad = NULL;
505 
506  av_freep(&s->dct_error_sum);
507  av_freep(&s->me.map);
508  av_freep(&s->me.score_map);
509  av_freep(&s->blocks);
510  av_freep(&s->ac_val_base);
511  s->block = NULL;
512 }
513 
515 {
516 #define COPY(a) bak->a = src->a
517  COPY(edge_emu_buffer);
518  COPY(me.scratchpad);
519  COPY(me.temp);
520  COPY(rd_scratchpad);
521  COPY(b_scratchpad);
522  COPY(obmc_scratchpad);
523  COPY(me.map);
524  COPY(me.score_map);
525  COPY(blocks);
526  COPY(block);
527  COPY(start_mb_y);
528  COPY(end_mb_y);
529  COPY(me.map_generation);
530  COPY(pb);
531  COPY(dct_error_sum);
532  COPY(dct_count[0]);
533  COPY(dct_count[1]);
534  COPY(ac_val_base);
535  COPY(ac_val[0]);
536  COPY(ac_val[1]);
537  COPY(ac_val[2]);
538 #undef COPY
539 }
540 
542 {
543  MpegEncContext bak;
544  int i, ret;
545  // FIXME copy only needed parts
546  // START_TIMER
547  backup_duplicate_context(&bak, dst);
548  memcpy(dst, src, sizeof(MpegEncContext));
549  backup_duplicate_context(dst, &bak);
550  for (i = 0; i < 12; i++) {
551  dst->pblocks[i] = &dst->block[i];
552  }
553  if (!dst->edge_emu_buffer &&
554  (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
555  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
556  "scratch buffers.\n");
557  return ret;
558  }
559  // STOP_TIMER("update_duplicate_context")
560  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
561  return 0;
562 }
563 
565  const AVCodecContext *src)
566 {
567  int i;
568  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
569 
570  if (dst == src || !s1->context_initialized)
571  return 0;
572 
573  // FIXME can parameters change on I-frames?
574  // in that case dst may need a reinit
575  if (!s->context_initialized) {
576  memcpy(s, s1, sizeof(MpegEncContext));
577 
578  s->avctx = dst;
581  s->bitstream_buffer = NULL;
583 
585  }
586 
587  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
588  int err;
589  s->context_reinit = 0;
590  s->height = s1->height;
591  s->width = s1->width;
592  if ((err = ff_MPV_common_frame_size_change(s)) < 0)
593  return err;
594  }
595 
596  s->avctx->coded_height = s1->avctx->coded_height;
597  s->avctx->coded_width = s1->avctx->coded_width;
598  s->avctx->width = s1->avctx->width;
599  s->avctx->height = s1->avctx->height;
600 
601  s->coded_picture_number = s1->coded_picture_number;
602  s->picture_number = s1->picture_number;
603  s->input_picture_number = s1->input_picture_number;
604 
605  memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
606  memcpy(&s->last_picture, &s1->last_picture,
607  (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
608 
609  // reset s->picture[].f.extended_data to s->picture[].f.data
610  for (i = 0; i < s->picture_count; i++)
611  s->picture[i].f.extended_data = s->picture[i].f.data;
612 
613  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
614  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
615  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
616 
617  // Error/bug resilience
618  s->next_p_frame_damaged = s1->next_p_frame_damaged;
619  s->workaround_bugs = s1->workaround_bugs;
620 
621  // MPEG4 timing info
622  memcpy(&s->time_increment_bits, &s1->time_increment_bits,
623  (char *) &s1->shape - (char *) &s1->time_increment_bits);
624 
625  // B-frame info
626  s->max_b_frames = s1->max_b_frames;
627  s->low_delay = s1->low_delay;
628  s->droppable = s1->droppable;
629 
630  // DivX handling (doesn't work)
631  s->divx_packed = s1->divx_packed;
632 
633  if (s1->bitstream_buffer) {
634  if (s1->bitstream_buffer_size +
638  s1->allocated_bitstream_buffer_size);
639  s->bitstream_buffer_size = s1->bitstream_buffer_size;
640  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
641  s1->bitstream_buffer_size);
642  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
644  }
645 
646  // linesize dependend scratch buffer allocation
647  if (!s->edge_emu_buffer)
648  if (s1->linesize) {
649  if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
650  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
651  "scratch buffers.\n");
652  return AVERROR(ENOMEM);
653  }
654  } else {
655  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
656  "be allocated due to unknown size.\n");
657  return AVERROR_BUG;
658  }
659 
660  // MPEG2/interlacing info
661  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
662  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
663 
664  if (!s1->first_field) {
665  s->last_pict_type = s1->pict_type;
666  if (s1->current_picture_ptr)
667  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
668 
669  if (s1->pict_type != AV_PICTURE_TYPE_B) {
670  s->last_non_b_pict_type = s1->pict_type;
671  }
672  }
673 
674  return 0;
675 }
676 
684 {
685  s->y_dc_scale_table =
688  s->progressive_frame = 1;
689  s->progressive_sequence = 1;
691 
692  s->coded_picture_number = 0;
693  s->picture_number = 0;
694  s->input_picture_number = 0;
695 
696  s->picture_in_gop_number = 0;
697 
698  s->f_code = 1;
699  s->b_code = 1;
700 
701  s->picture_range_start = 0;
703 
704  s->slice_context_count = 1;
705 }
706 
713 {
715 }
716 
721 {
722  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
723 
724  s->mb_width = (s->width + 15) / 16;
725  s->mb_stride = s->mb_width + 1;
726  s->b8_stride = s->mb_width * 2 + 1;
727  s->b4_stride = s->mb_width * 4 + 1;
728  mb_array_size = s->mb_height * s->mb_stride;
729  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
730 
731  /* set default edge pos, will be overriden
732  * in decode_header if needed */
733  s->h_edge_pos = s->mb_width * 16;
734  s->v_edge_pos = s->mb_height * 16;
735 
736  s->mb_num = s->mb_width * s->mb_height;
737 
738  s->block_wrap[0] =
739  s->block_wrap[1] =
740  s->block_wrap[2] =
741  s->block_wrap[3] = s->b8_stride;
742  s->block_wrap[4] =
743  s->block_wrap[5] = s->mb_stride;
744 
745  y_size = s->b8_stride * (2 * s->mb_height + 1);
746  c_size = s->mb_stride * (s->mb_height + 1);
747  yc_size = y_size + 2 * c_size;
748 
749  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
750  fail); // error ressilience code looks cleaner with this
751  for (y = 0; y < s->mb_height; y++)
752  for (x = 0; x < s->mb_width; x++)
753  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
754 
755  s->mb_index2xy[s->mb_height * s->mb_width] =
756  (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
757 
758  if (s->encoding) {
759  /* Allocate MV tables */
761  mv_table_size * 2 * sizeof(int16_t), fail);
763  mv_table_size * 2 * sizeof(int16_t), fail);
765  mv_table_size * 2 * sizeof(int16_t), fail);
767  mv_table_size * 2 * sizeof(int16_t), fail);
769  mv_table_size * 2 * sizeof(int16_t), fail);
771  mv_table_size * 2 * sizeof(int16_t), fail);
772  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
776  s->mb_stride + 1;
778  s->mb_stride + 1;
780 
781  /* Allocate MB type table */
782  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size *
783  sizeof(uint16_t), fail); // needed for encoding
784 
785  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size *
786  sizeof(int), fail);
787 
789  mb_array_size * sizeof(float), fail);
791  mb_array_size * sizeof(float), fail);
792 
793  }
794 
796  mb_array_size * sizeof(uint8_t), fail);
798  mb_array_size * sizeof(uint8_t), fail);
799 
800  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
802  /* interlaced direct mode decoding tables */
803  for (i = 0; i < 2; i++) {
804  int j, k;
805  for (j = 0; j < 2; j++) {
806  for (k = 0; k < 2; k++) {
808  s->b_field_mv_table_base[i][j][k],
809  mv_table_size * 2 * sizeof(int16_t),
810  fail);
811  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
812  s->mb_stride + 1;
813  }
815  mb_array_size * 2 * sizeof(uint8_t), fail);
817  mv_table_size * 2 * sizeof(int16_t), fail);
818  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]
819  + s->mb_stride + 1;
820  }
822  mb_array_size * 2 * sizeof(uint8_t), fail);
823  }
824  }
825  if (s->out_format == FMT_H263) {
826  /* cbp values */
827  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
828  s->coded_block = s->coded_block_base + s->b8_stride + 1;
829 
830  /* cbp, ac_pred, pred_dir */
832  mb_array_size * sizeof(uint8_t), fail);
834  mb_array_size * sizeof(uint8_t), fail);
835  }
836 
837  if (s->h263_pred || s->h263_plus || !s->encoding) {
838  /* dc values */
839  // MN: we need these for error resilience of intra-frames
841  yc_size * sizeof(int16_t), fail);
842  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
843  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
844  s->dc_val[2] = s->dc_val[1] + c_size;
845  for (i = 0; i < yc_size; i++)
846  s->dc_val_base[i] = 1024;
847  }
848 
849  /* which mb is a intra block */
850  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
851  memset(s->mbintra_table, 1, mb_array_size);
852 
853  /* init macroblock skip table */
854  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
855  // Note the + 1 is for a quicker mpeg4 slice_end detection
856 
858  s->avctx->debug_mv) {
859  s->visualization_buffer[0] = av_malloc((s->mb_width * 16 +
860  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
861  s->visualization_buffer[1] = av_malloc((s->mb_width * 16 +
862  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
863  s->visualization_buffer[2] = av_malloc((s->mb_width * 16 +
864  2 * EDGE_WIDTH) * s->mb_height * 16 + 2 * EDGE_WIDTH);
865  }
866 
867  return 0;
868 fail:
869  return AVERROR(ENOMEM);
870 }
871 
877 {
878  int i;
879  int nb_slices = (HAVE_THREADS &&
881  s->avctx->thread_count : 1;
882 
883  if (s->encoding && s->avctx->slices)
884  nb_slices = s->avctx->slices;
885 
887  s->mb_height = (s->height + 31) / 32 * 2;
888  else if (s->codec_id != AV_CODEC_ID_H264)
889  s->mb_height = (s->height + 15) / 16;
890 
891  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
893  "decoding to AV_PIX_FMT_NONE is not supported.\n");
894  return -1;
895  }
896 
897  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
898  int max_slices;
899  if (s->mb_height)
900  max_slices = FFMIN(MAX_THREADS, s->mb_height);
901  else
902  max_slices = MAX_THREADS;
903  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
904  " reducing to %d\n", nb_slices, max_slices);
905  nb_slices = max_slices;
906  }
907 
908  if ((s->width || s->height) &&
909  av_image_check_size(s->width, s->height, 0, s->avctx))
910  return -1;
911 
913 
914  s->flags = s->avctx->flags;
915  s->flags2 = s->avctx->flags2;
916 
917  /* set chroma shifts */
919  &s->chroma_x_shift,
920  &s->chroma_y_shift);
921 
922  /* convert fourcc to upper case */
924 
926 
927  if (s->width && s->height) {
929 
930  if (s->encoding) {
931  if (s->msmpeg4_version) {
933  2 * 2 * (MAX_LEVEL + 1) *
934  (MAX_RUN + 1) * 2 * sizeof(int), fail);
935  }
936  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
937 
939  64 * 32 * sizeof(int), fail);
941  64 * 32 * sizeof(int), fail);
943  64 * 32 * 2 * sizeof(uint16_t), fail);
945  64 * 32 * 2 * sizeof(uint16_t), fail);
947  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
949  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
950 
951  if (s->avctx->noise_reduction) {
953  2 * 64 * sizeof(uint16_t), fail);
954  }
955  }
956  }
957 
960  s->picture_count * sizeof(Picture), fail);
961  for (i = 0; i < s->picture_count; i++) {
963  }
964 
965  if (s->width && s->height) {
966  if (init_context_frame(s))
967  goto fail;
968 
969  s->parse_context.state = -1;
970  }
971 
972  s->context_initialized = 1;
973  s->thread_context[0] = s;
974 
975  if (s->width && s->height) {
976  if (nb_slices > 1) {
977  for (i = 1; i < nb_slices; i++) {
978  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
979  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
980  }
981 
982  for (i = 0; i < nb_slices; i++) {
983  if (init_duplicate_context(s->thread_context[i], s) < 0)
984  goto fail;
985  s->thread_context[i]->start_mb_y =
986  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
987  s->thread_context[i]->end_mb_y =
988  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
989  }
990  } else {
991  if (init_duplicate_context(s, s) < 0)
992  goto fail;
993  s->start_mb_y = 0;
994  s->end_mb_y = s->mb_height;
995  }
996  s->slice_context_count = nb_slices;
997  }
998 
999  return 0;
1000  fail:
1001  ff_MPV_common_end(s);
1002  return -1;
1003 }
1004 
1011 {
1012  int i, j, k;
1013 
1014  av_freep(&s->mb_type);
1021  s->p_mv_table = NULL;
1022  s->b_forw_mv_table = NULL;
1023  s->b_back_mv_table = NULL;
1026  s->b_direct_mv_table = NULL;
1027  for (i = 0; i < 2; i++) {
1028  for (j = 0; j < 2; j++) {
1029  for (k = 0; k < 2; k++) {
1030  av_freep(&s->b_field_mv_table_base[i][j][k]);
1031  s->b_field_mv_table[i][j][k] = NULL;
1032  }
1033  av_freep(&s->b_field_select_table[i][j]);
1034  av_freep(&s->p_field_mv_table_base[i][j]);
1035  s->p_field_mv_table[i][j] = NULL;
1036  }
1038  }
1039 
1040  av_freep(&s->dc_val_base);
1042  av_freep(&s->mbintra_table);
1043  av_freep(&s->cbp_table);
1044  av_freep(&s->pred_dir_table);
1045 
1046  av_freep(&s->mbskip_table);
1047 
1049  av_freep(&s->er_temp_buffer);
1050  av_freep(&s->mb_index2xy);
1051  av_freep(&s->lambda_table);
1052  av_freep(&s->cplx_tab);
1053  av_freep(&s->bits_tab);
1054 
1055  s->linesize = s->uvlinesize = 0;
1056 
1057  for (i = 0; i < 3; i++)
1059 
1060  return 0;
1061 }
1062 
1064 {
1065  int i, err = 0;
1066 
1067  if (s->slice_context_count > 1) {
1068  for (i = 0; i < s->slice_context_count; i++) {
1070  }
1071  for (i = 1; i < s->slice_context_count; i++) {
1072  av_freep(&s->thread_context[i]);
1073  }
1074  } else
1076 
1077  free_context_frame(s);
1078 
1079  if (s->picture)
1080  for (i = 0; i < s->picture_count; i++) {
1081  s->picture[i].needs_realloc = 1;
1082  }
1083 
1084  s->last_picture_ptr =
1085  s->next_picture_ptr =
1087 
1088  // init
1090  s->mb_height = (s->height + 31) / 32 * 2;
1091  else if (s->codec_id != AV_CODEC_ID_H264)
1092  s->mb_height = (s->height + 15) / 16;
1093 
1094  if ((s->width || s->height) &&
1095  av_image_check_size(s->width, s->height, 0, s->avctx))
1096  return AVERROR_INVALIDDATA;
1097 
1098  if ((err = init_context_frame(s)))
1099  goto fail;
1100 
1101  s->thread_context[0] = s;
1102 
1103  if (s->width && s->height) {
1104  int nb_slices = s->slice_context_count;
1105  if (nb_slices > 1) {
1106  for (i = 1; i < nb_slices; i++) {
1107  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1108  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1109  }
1110 
1111  for (i = 0; i < nb_slices; i++) {
1112  if (init_duplicate_context(s->thread_context[i], s) < 0)
1113  goto fail;
1114  s->thread_context[i]->start_mb_y =
1115  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1116  s->thread_context[i]->end_mb_y =
1117  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1118  }
1119  } else {
1120  if (init_duplicate_context(s, s) < 0)
1121  goto fail;
1122  s->start_mb_y = 0;
1123  s->end_mb_y = s->mb_height;
1124  }
1125  s->slice_context_count = nb_slices;
1126  }
1127 
1128  return 0;
1129  fail:
1130  ff_MPV_common_end(s);
1131  return err;
1132 }
1133 
1134 /* init common structure for both encoder and decoder */
1136 {
1137  int i;
1138 
1139  if (s->slice_context_count > 1) {
1140  for (i = 0; i < s->slice_context_count; i++) {
1142  }
1143  for (i = 1; i < s->slice_context_count; i++) {
1144  av_freep(&s->thread_context[i]);
1145  }
1146  s->slice_context_count = 1;
1147  } else free_duplicate_context(s);
1148 
1150  s->parse_context.buffer_size = 0;
1151 
1154 
1155  av_freep(&s->avctx->stats_out);
1156  av_freep(&s->ac_stats);
1157 
1158  av_freep(&s->q_intra_matrix);
1159  av_freep(&s->q_inter_matrix);
1162  av_freep(&s->input_picture);
1164  av_freep(&s->dct_offset);
1165 
1166  if (s->picture && !s->avctx->internal->is_copy) {
1167  for (i = 0; i < s->picture_count; i++) {
1168  free_picture(s, &s->picture[i]);
1169  }
1170  }
1171  av_freep(&s->picture);
1172 
1173  free_context_frame(s);
1174 
1177 
1178  s->context_initialized = 0;
1179  s->last_picture_ptr =
1180  s->next_picture_ptr =
1182  s->linesize = s->uvlinesize = 0;
1183 }
1184 
1186  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1187 {
1188  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1189  uint8_t index_run[MAX_RUN + 1];
1190  int last, run, level, start, end, i;
1191 
1192  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1193  if (static_store && rl->max_level[0])
1194  return;
1195 
1196  /* compute max_level[], max_run[] and index_run[] */
1197  for (last = 0; last < 2; last++) {
1198  if (last == 0) {
1199  start = 0;
1200  end = rl->last;
1201  } else {
1202  start = rl->last;
1203  end = rl->n;
1204  }
1205 
1206  memset(max_level, 0, MAX_RUN + 1);
1207  memset(max_run, 0, MAX_LEVEL + 1);
1208  memset(index_run, rl->n, MAX_RUN + 1);
1209  for (i = start; i < end; i++) {
1210  run = rl->table_run[i];
1211  level = rl->table_level[i];
1212  if (index_run[run] == rl->n)
1213  index_run[run] = i;
1214  if (level > max_level[run])
1215  max_level[run] = level;
1216  if (run > max_run[level])
1217  max_run[level] = run;
1218  }
1219  if (static_store)
1220  rl->max_level[last] = static_store[last];
1221  else
1222  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1223  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1224  if (static_store)
1225  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1226  else
1227  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1228  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1229  if (static_store)
1230  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1231  else
1232  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1233  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1234  }
1235 }
1236 
1238 {
1239  int i, q;
1240 
1241  for (q = 0; q < 32; q++) {
1242  int qmul = q * 2;
1243  int qadd = (q - 1) | 1;
1244 
1245  if (q == 0) {
1246  qmul = 1;
1247  qadd = 0;
1248  }
1249  for (i = 0; i < rl->vlc.table_size; i++) {
1250  int code = rl->vlc.table[i][0];
1251  int len = rl->vlc.table[i][1];
1252  int level, run;
1253 
1254  if (len == 0) { // illegal code
1255  run = 66;
1256  level = MAX_LEVEL;
1257  } else if (len < 0) { // more bits needed
1258  run = 0;
1259  level = code;
1260  } else {
1261  if (code == rl->n) { // esc
1262  run = 66;
1263  level = 0;
1264  } else {
1265  run = rl->table_run[code] + 1;
1266  level = rl->table_level[code] * qmul + qadd;
1267  if (code >= rl->last) run += 192;
1268  }
1269  }
1270  rl->rl_vlc[q][i].len = len;
1271  rl->rl_vlc[q][i].level = level;
1272  rl->rl_vlc[q][i].run = run;
1273  }
1274  }
1275 }
1276 
1277 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1278 {
1279  int i;
1280 
1281  /* release non reference frames */
1282  for (i = 0; i < s->picture_count; i++) {
1283  if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1284  (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1285  (remove_current || &s->picture[i] != s->current_picture_ptr)
1286  /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1287  free_frame_buffer(s, &s->picture[i]);
1288  }
1289  }
1290 }
1291 
1292 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1293 {
1294  if (pic->f.data[0] == NULL)
1295  return 1;
1296  if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
1297  if (!pic->owner2 || pic->owner2 == s)
1298  return 1;
1299  return 0;
1300 }
1301 
1302 static int find_unused_picture(MpegEncContext *s, int shared)
1303 {
1304  int i;
1305 
1306  if (shared) {
1307  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1308  if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1309  return i;
1310  }
1311  } else {
1312  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1313  if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1314  return i; // FIXME
1315  }
1316  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1317  if (pic_is_unused(s, &s->picture[i]))
1318  return i;
1319  }
1320  }
1321 
1322  return AVERROR_INVALIDDATA;
1323 }
1324 
1326 {
1327  int ret = find_unused_picture(s, shared);
1328 
1329  if (ret >= 0 && ret < s->picture_range_end) {
1330  if (s->picture[ret].needs_realloc) {
1331  s->picture[ret].needs_realloc = 0;
1332  free_picture(s, &s->picture[ret]);
1334  }
1335  }
1336  return ret;
1337 }
1338 
1340 {
1341  int intra, i;
1342 
1343  for (intra = 0; intra < 2; intra++) {
1344  if (s->dct_count[intra] > (1 << 16)) {
1345  for (i = 0; i < 64; i++) {
1346  s->dct_error_sum[intra][i] >>= 1;
1347  }
1348  s->dct_count[intra] >>= 1;
1349  }
1350 
1351  for (i = 0; i < 64; i++) {
1352  s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1353  s->dct_count[intra] +
1354  s->dct_error_sum[intra][i] / 2) /
1355  (s->dct_error_sum[intra][i] + 1);
1356  }
1357  }
1358 }
1359 
1365 {
1366  int i;
1367  Picture *pic;
1368  s->mb_skipped = 0;
1369 
1370  /* mark & release old frames */
1371  if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1372  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1374  s->last_picture_ptr->f.data[0]) {
1375  if (s->last_picture_ptr->owner2 == s)
1377  }
1378 
1379  /* release forgotten pictures */
1380  /* if (mpeg124/h263) */
1381  if (!s->encoding) {
1382  for (i = 0; i < s->picture_count; i++) {
1383  if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1384  &s->picture[i] != s->last_picture_ptr &&
1385  &s->picture[i] != s->next_picture_ptr &&
1386  s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1387  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1388  av_log(avctx, AV_LOG_ERROR,
1389  "releasing zombie picture\n");
1390  free_frame_buffer(s, &s->picture[i]);
1391  }
1392  }
1393  }
1394  }
1395 
1396  if (!s->encoding) {
1398 
1399  if (s->current_picture_ptr &&
1400  s->current_picture_ptr->f.data[0] == NULL) {
1401  // we already have a unused image
1402  // (maybe it was set before reading the header)
1403  pic = s->current_picture_ptr;
1404  } else {
1405  i = ff_find_unused_picture(s, 0);
1406  if (i < 0) {
1407  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1408  return i;
1409  }
1410  pic = &s->picture[i];
1411  }
1412 
1413  pic->f.reference = 0;
1414  if (!s->droppable) {
1415  if (s->codec_id == AV_CODEC_ID_H264)
1416  pic->f.reference = s->picture_structure;
1417  else if (s->pict_type != AV_PICTURE_TYPE_B)
1418  pic->f.reference = 3;
1419  }
1420 
1422 
1423  if (ff_alloc_picture(s, pic, 0) < 0)
1424  return -1;
1425 
1426  s->current_picture_ptr = pic;
1427  // FIXME use only the vars from current_pic
1429  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1431  if (s->picture_structure != PICT_FRAME)
1434  }
1438  }
1439 
1441  // if (s->flags && CODEC_FLAG_QSCALE)
1442  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1444 
1446 
1447  if (s->pict_type != AV_PICTURE_TYPE_B) {
1449  if (!s->droppable)
1451  }
1452  av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1457  s->pict_type, s->droppable);
1458 
1459  if (s->codec_id != AV_CODEC_ID_H264) {
1460  if ((s->last_picture_ptr == NULL ||
1461  s->last_picture_ptr->f.data[0] == NULL) &&
1462  (s->pict_type != AV_PICTURE_TYPE_I ||
1463  s->picture_structure != PICT_FRAME)) {
1464  if (s->pict_type != AV_PICTURE_TYPE_I)
1465  av_log(avctx, AV_LOG_ERROR,
1466  "warning: first frame is no keyframe\n");
1467  else if (s->picture_structure != PICT_FRAME)
1468  av_log(avctx, AV_LOG_INFO,
1469  "allocate dummy last picture for field based first keyframe\n");
1470 
1471  /* Allocate a dummy frame */
1472  i = ff_find_unused_picture(s, 0);
1473  if (i < 0) {
1474  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1475  return i;
1476  }
1477  s->last_picture_ptr = &s->picture[i];
1478  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1479  s->last_picture_ptr = NULL;
1480  return -1;
1481  }
1482  ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1483  ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1484  s->last_picture_ptr->f.reference = 3;
1485  }
1486  if ((s->next_picture_ptr == NULL ||
1487  s->next_picture_ptr->f.data[0] == NULL) &&
1488  s->pict_type == AV_PICTURE_TYPE_B) {
1489  /* Allocate a dummy frame */
1490  i = ff_find_unused_picture(s, 0);
1491  if (i < 0) {
1492  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1493  return i;
1494  }
1495  s->next_picture_ptr = &s->picture[i];
1496  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1497  s->next_picture_ptr = NULL;
1498  return -1;
1499  }
1500  ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1501  ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1502  s->next_picture_ptr->f.reference = 3;
1503  }
1504  }
1505 
1506  if (s->last_picture_ptr)
1508  if (s->next_picture_ptr)
1510 
1511  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1512  if (s->next_picture_ptr)
1513  s->next_picture_ptr->owner2 = s;
1514  if (s->last_picture_ptr)
1515  s->last_picture_ptr->owner2 = s;
1516  }
1517 
1518  assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1519  s->last_picture_ptr->f.data[0]));
1520 
1521  if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1522  int i;
1523  for (i = 0; i < 4; i++) {
1525  s->current_picture.f.data[i] +=
1526  s->current_picture.f.linesize[i];
1527  }
1528  s->current_picture.f.linesize[i] *= 2;
1529  s->last_picture.f.linesize[i] *= 2;
1530  s->next_picture.f.linesize[i] *= 2;
1531  }
1532  }
1533 
1534  s->err_recognition = avctx->err_recognition;
1535 
1536  /* set dequantizer, we can't do it during init as
1537  * it might change for mpeg4 and we can't do it in the header
1538  * decode as init is not called for mpeg4 there yet */
1539  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1542  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1545  } else {
1548  }
1549 
1550  if (s->dct_error_sum) {
1551  assert(s->avctx->noise_reduction && s->encoding);
1553  }
1554 
1556  return ff_xvmc_field_start(s, avctx);
1557 
1558  return 0;
1559 }
1560 
1561 /* generic function for encode/decode called after a
1562  * frame has been coded/decoded. */
1564 {
1565  int i;
1566  /* redraw edges for the frame if decoding didn't complete */
1567  // just to make sure that all data is rendered.
1569  ff_xvmc_field_end(s);
1570  } else if ((s->error_count || s->encoding) &&
1571  !s->avctx->hwaccel &&
1573  s->unrestricted_mv &&
1575  !s->intra_only &&
1576  !(s->flags & CODEC_FLAG_EMU_EDGE)) {
1578  int hshift = desc->log2_chroma_w;
1579  int vshift = desc->log2_chroma_h;
1581  s->h_edge_pos, s->v_edge_pos,
1583  EDGE_TOP | EDGE_BOTTOM);
1585  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1586  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1587  EDGE_TOP | EDGE_BOTTOM);
1589  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1590  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1591  EDGE_TOP | EDGE_BOTTOM);
1592  }
1593 
1594  emms_c();
1595 
1596  s->last_pict_type = s->pict_type;
1598  if (s->pict_type!= AV_PICTURE_TYPE_B) {
1600  }
1601 #if 0
1602  /* copy back current_picture variables */
1603  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1604  if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1605  s->picture[i] = s->current_picture;
1606  break;
1607  }
1608  }
1609  assert(i < MAX_PICTURE_COUNT);
1610 #endif
1611 
1612  if (s->encoding) {
1613  /* release non-reference frames */
1614  for (i = 0; i < s->picture_count; i++) {
1615  if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1616  /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1617  free_frame_buffer(s, &s->picture[i]);
1618  }
1619  }
1620  }
1621  // clear copies, to avoid confusion
1622 #if 0
1623  memset(&s->last_picture, 0, sizeof(Picture));
1624  memset(&s->next_picture, 0, sizeof(Picture));
1625  memset(&s->current_picture, 0, sizeof(Picture));
1626 #endif
1628 
1631  }
1632 }
1633 
1641 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1642  int w, int h, int stride, int color)
1643 {
1644  int x, y, fr, f;
1645 
1646  sx = av_clip(sx, 0, w - 1);
1647  sy = av_clip(sy, 0, h - 1);
1648  ex = av_clip(ex, 0, w - 1);
1649  ey = av_clip(ey, 0, h - 1);
1650 
1651  buf[sy * stride + sx] += color;
1652 
1653  if (FFABS(ex - sx) > FFABS(ey - sy)) {
1654  if (sx > ex) {
1655  FFSWAP(int, sx, ex);
1656  FFSWAP(int, sy, ey);
1657  }
1658  buf += sx + sy * stride;
1659  ex -= sx;
1660  f = ((ey - sy) << 16) / ex;
1661  for (x = 0; x <= ex; x++) {
1662  y = (x * f) >> 16;
1663  fr = (x * f) & 0xFFFF;
1664  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1665  buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1666  }
1667  } else {
1668  if (sy > ey) {
1669  FFSWAP(int, sx, ex);
1670  FFSWAP(int, sy, ey);
1671  }
1672  buf += sx + sy * stride;
1673  ey -= sy;
1674  if (ey)
1675  f = ((ex - sx) << 16) / ey;
1676  else
1677  f = 0;
1678  for (y = 0; y = ey; y++) {
1679  x = (y * f) >> 16;
1680  fr = (y * f) & 0xFFFF;
1681  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1682  buf[y * stride + x + 1] += (color * fr ) >> 16;
1683  }
1684  }
1685 }
1686 
1694 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1695  int ey, int w, int h, int stride, int color)
1696 {
1697  int dx,dy;
1698 
1699  sx = av_clip(sx, -100, w + 100);
1700  sy = av_clip(sy, -100, h + 100);
1701  ex = av_clip(ex, -100, w + 100);
1702  ey = av_clip(ey, -100, h + 100);
1703 
1704  dx = ex - sx;
1705  dy = ey - sy;
1706 
1707  if (dx * dx + dy * dy > 3 * 3) {
1708  int rx = dx + dy;
1709  int ry = -dx + dy;
1710  int length = ff_sqrt((rx * rx + ry * ry) << 8);
1711 
1712  // FIXME subpixel accuracy
1713  rx = ROUNDED_DIV(rx * 3 << 4, length);
1714  ry = ROUNDED_DIV(ry * 3 << 4, length);
1715 
1716  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1717  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1718  }
1719  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1720 }
1721 
1726 {
1727  if (s->avctx->hwaccel || !pict || !pict->mb_type)
1728  return;
1729 
1731  int x,y;
1732 
1733  av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1734  switch (pict->pict_type) {
1735  case AV_PICTURE_TYPE_I:
1736  av_log(s->avctx,AV_LOG_DEBUG,"I\n");
1737  break;
1738  case AV_PICTURE_TYPE_P:
1739  av_log(s->avctx,AV_LOG_DEBUG,"P\n");
1740  break;
1741  case AV_PICTURE_TYPE_B:
1742  av_log(s->avctx,AV_LOG_DEBUG,"B\n");
1743  break;
1744  case AV_PICTURE_TYPE_S:
1745  av_log(s->avctx,AV_LOG_DEBUG,"S\n");
1746  break;
1747  case AV_PICTURE_TYPE_SI:
1748  av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
1749  break;
1750  case AV_PICTURE_TYPE_SP:
1751  av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
1752  break;
1753  }
1754  for (y = 0; y < s->mb_height; y++) {
1755  for (x = 0; x < s->mb_width; x++) {
1756  if (s->avctx->debug & FF_DEBUG_SKIP) {
1757  int count = s->mbskip_table[x + y * s->mb_stride];
1758  if (count > 9)
1759  count = 9;
1760  av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1761  }
1762  if (s->avctx->debug & FF_DEBUG_QP) {
1763  av_log(s->avctx, AV_LOG_DEBUG, "%2d",
1764  pict->qscale_table[x + y * s->mb_stride]);
1765  }
1766  if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
1767  int mb_type = pict->mb_type[x + y * s->mb_stride];
1768  // Type & MV direction
1769  if (IS_PCM(mb_type))
1770  av_log(s->avctx, AV_LOG_DEBUG, "P");
1771  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1772  av_log(s->avctx, AV_LOG_DEBUG, "A");
1773  else if (IS_INTRA4x4(mb_type))
1774  av_log(s->avctx, AV_LOG_DEBUG, "i");
1775  else if (IS_INTRA16x16(mb_type))
1776  av_log(s->avctx, AV_LOG_DEBUG, "I");
1777  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1778  av_log(s->avctx, AV_LOG_DEBUG, "d");
1779  else if (IS_DIRECT(mb_type))
1780  av_log(s->avctx, AV_LOG_DEBUG, "D");
1781  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1782  av_log(s->avctx, AV_LOG_DEBUG, "g");
1783  else if (IS_GMC(mb_type))
1784  av_log(s->avctx, AV_LOG_DEBUG, "G");
1785  else if (IS_SKIP(mb_type))
1786  av_log(s->avctx, AV_LOG_DEBUG, "S");
1787  else if (!USES_LIST(mb_type, 1))
1788  av_log(s->avctx, AV_LOG_DEBUG, ">");
1789  else if (!USES_LIST(mb_type, 0))
1790  av_log(s->avctx, AV_LOG_DEBUG, "<");
1791  else {
1792  assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1793  av_log(s->avctx, AV_LOG_DEBUG, "X");
1794  }
1795 
1796  // segmentation
1797  if (IS_8X8(mb_type))
1798  av_log(s->avctx, AV_LOG_DEBUG, "+");
1799  else if (IS_16X8(mb_type))
1800  av_log(s->avctx, AV_LOG_DEBUG, "-");
1801  else if (IS_8X16(mb_type))
1802  av_log(s->avctx, AV_LOG_DEBUG, "|");
1803  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1804  av_log(s->avctx, AV_LOG_DEBUG, " ");
1805  else
1806  av_log(s->avctx, AV_LOG_DEBUG, "?");
1807 
1808 
1809  if (IS_INTERLACED(mb_type))
1810  av_log(s->avctx, AV_LOG_DEBUG, "=");
1811  else
1812  av_log(s->avctx, AV_LOG_DEBUG, " ");
1813  }
1814  }
1815  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1816  }
1817  }
1818 
1819  if ((s->avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1820  (s->avctx->debug_mv)) {
1821  const int shift = 1 + s->quarter_sample;
1822  int mb_y;
1823  uint8_t *ptr;
1824  int i;
1825  int h_chroma_shift, v_chroma_shift, block_height;
1826  const int width = s->avctx->width;
1827  const int height = s->avctx->height;
1828  const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1829  const int mv_stride = (s->mb_width << mv_sample_log2) +
1830  (s->codec_id == AV_CODEC_ID_H264 ? 0 : 1);
1831  s->low_delay = 0; // needed to see the vectors without trashing the buffers
1832 
1834  &h_chroma_shift, &v_chroma_shift);
1835  for (i = 0; i < 3; i++) {
1836  memcpy(s->visualization_buffer[i], pict->data[i],
1837  (i == 0) ? pict->linesize[i] * height:
1838  pict->linesize[i] * height >> v_chroma_shift);
1839  pict->data[i] = s->visualization_buffer[i];
1840  }
1841  pict->type = FF_BUFFER_TYPE_COPY;
1842  ptr = pict->data[0];
1843  block_height = 16 >> v_chroma_shift;
1844 
1845  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1846  int mb_x;
1847  for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1848  const int mb_index = mb_x + mb_y * s->mb_stride;
1849  if ((s->avctx->debug_mv) && pict->motion_val) {
1850  int type;
1851  for (type = 0; type < 3; type++) {
1852  int direction = 0;
1853  switch (type) {
1854  case 0:
1855  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1856  (pict->pict_type!= AV_PICTURE_TYPE_P))
1857  continue;
1858  direction = 0;
1859  break;
1860  case 1:
1861  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1862  (pict->pict_type!= AV_PICTURE_TYPE_B))
1863  continue;
1864  direction = 0;
1865  break;
1866  case 2:
1867  if ((!(s->avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1868  (pict->pict_type!= AV_PICTURE_TYPE_B))
1869  continue;
1870  direction = 1;
1871  break;
1872  }
1873  if (!USES_LIST(pict->mb_type[mb_index], direction))
1874  continue;
1875 
1876  if (IS_8X8(pict->mb_type[mb_index])) {
1877  int i;
1878  for (i = 0; i < 4; i++) {
1879  int sx = mb_x * 16 + 4 + 8 * (i & 1);
1880  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1881  int xy = (mb_x * 2 + (i & 1) +
1882  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1883  int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1884  int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1885  draw_arrow(ptr, sx, sy, mx, my, width,
1886  height, s->linesize, 100);
1887  }
1888  } else if (IS_16X8(pict->mb_type[mb_index])) {
1889  int i;
1890  for (i = 0; i < 2; i++) {
1891  int sx = mb_x * 16 + 8;
1892  int sy = mb_y * 16 + 4 + 8 * i;
1893  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1894  int mx = (pict->motion_val[direction][xy][0] >> shift);
1895  int my = (pict->motion_val[direction][xy][1] >> shift);
1896 
1897  if (IS_INTERLACED(pict->mb_type[mb_index]))
1898  my *= 2;
1899 
1900  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1901  height, s->linesize, 100);
1902  }
1903  } else if (IS_8X16(pict->mb_type[mb_index])) {
1904  int i;
1905  for (i = 0; i < 2; i++) {
1906  int sx = mb_x * 16 + 4 + 8 * i;
1907  int sy = mb_y * 16 + 8;
1908  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1909  int mx = pict->motion_val[direction][xy][0] >> shift;
1910  int my = pict->motion_val[direction][xy][1] >> shift;
1911 
1912  if (IS_INTERLACED(pict->mb_type[mb_index]))
1913  my *= 2;
1914 
1915  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1916  height, s->linesize, 100);
1917  }
1918  } else {
1919  int sx = mb_x * 16 + 8;
1920  int sy = mb_y * 16 + 8;
1921  int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
1922  int mx = pict->motion_val[direction][xy][0] >> shift + sx;
1923  int my = pict->motion_val[direction][xy][1] >> shift + sy;
1924  draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1925  }
1926  }
1927  }
1928  if ((s->avctx->debug & FF_DEBUG_VIS_QP) && pict->motion_val) {
1929  uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1930  0x0101010101010101ULL;
1931  int y;
1932  for (y = 0; y < block_height; y++) {
1933  *(uint64_t *)(pict->data[1] + 8 * mb_x +
1934  (block_height * mb_y + y) *
1935  pict->linesize[1]) = c;
1936  *(uint64_t *)(pict->data[2] + 8 * mb_x +
1937  (block_height * mb_y + y) *
1938  pict->linesize[2]) = c;
1939  }
1940  }
1941  if ((s->avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
1942  pict->motion_val) {
1943  int mb_type = pict->mb_type[mb_index];
1944  uint64_t u,v;
1945  int y;
1946 #define COLOR(theta, r) \
1947  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
1948  v = (int)(128 + r * sin(theta * 3.141592 / 180));
1949 
1950 
1951  u = v = 128;
1952  if (IS_PCM(mb_type)) {
1953  COLOR(120, 48)
1954  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
1955  IS_INTRA16x16(mb_type)) {
1956  COLOR(30, 48)
1957  } else if (IS_INTRA4x4(mb_type)) {
1958  COLOR(90, 48)
1959  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
1960  // COLOR(120, 48)
1961  } else if (IS_DIRECT(mb_type)) {
1962  COLOR(150, 48)
1963  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
1964  COLOR(170, 48)
1965  } else if (IS_GMC(mb_type)) {
1966  COLOR(190, 48)
1967  } else if (IS_SKIP(mb_type)) {
1968  // COLOR(180, 48)
1969  } else if (!USES_LIST(mb_type, 1)) {
1970  COLOR(240, 48)
1971  } else if (!USES_LIST(mb_type, 0)) {
1972  COLOR(0, 48)
1973  } else {
1974  assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1975  COLOR(300,48)
1976  }
1977 
1978  u *= 0x0101010101010101ULL;
1979  v *= 0x0101010101010101ULL;
1980  for (y = 0; y < block_height; y++) {
1981  *(uint64_t *)(pict->data[1] + 8 * mb_x +
1982  (block_height * mb_y + y) * pict->linesize[1]) = u;
1983  *(uint64_t *)(pict->data[2] + 8 * mb_x +
1984  (block_height * mb_y + y) * pict->linesize[2]) = v;
1985  }
1986 
1987  // segmentation
1988  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
1989  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
1990  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1991  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
1992  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
1993  }
1994  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
1995  for (y = 0; y < 16; y++)
1996  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
1997  pict->linesize[0]] ^= 0x80;
1998  }
1999  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2000  int dm = 1 << (mv_sample_log2 - 2);
2001  for (i = 0; i < 4; i++) {
2002  int sx = mb_x * 16 + 8 * (i & 1);
2003  int sy = mb_y * 16 + 8 * (i >> 1);
2004  int xy = (mb_x * 2 + (i & 1) +
2005  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2006  // FIXME bidir
2007  int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
2008  if (mv[0] != mv[dm] ||
2009  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2010  for (y = 0; y < 8; y++)
2011  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2012  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2013  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2014  pict->linesize[0]) ^= 0x8080808080808080ULL;
2015  }
2016  }
2017 
2018  if (IS_INTERLACED(mb_type) &&
2019  s->codec_id == AV_CODEC_ID_H264) {
2020  // hmm
2021  }
2022  }
2023  s->mbskip_table[mb_index] = 0;
2024  }
2025  }
2026  }
2027 }
2028 
2033 {
2034  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2035  int my, off, i, mvs;
2036 
2037  if (s->picture_structure != PICT_FRAME || s->mcsel)
2038  goto unhandled;
2039 
2040  switch (s->mv_type) {
2041  case MV_TYPE_16X16:
2042  mvs = 1;
2043  break;
2044  case MV_TYPE_16X8:
2045  mvs = 2;
2046  break;
2047  case MV_TYPE_8X8:
2048  mvs = 4;
2049  break;
2050  default:
2051  goto unhandled;
2052  }
2053 
2054  for (i = 0; i < mvs; i++) {
2055  my = s->mv[dir][i][1]<<qpel_shift;
2056  my_max = FFMAX(my_max, my);
2057  my_min = FFMIN(my_min, my);
2058  }
2059 
2060  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2061 
2062  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2063 unhandled:
2064  return s->mb_height-1;
2065 }
2066 
2067 /* put block[] to dest[] */
2068 static inline void put_dct(MpegEncContext *s,
2069  DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2070 {
2071  s->dct_unquantize_intra(s, block, i, qscale);
2072  s->dsp.idct_put (dest, line_size, block);
2073 }
2074 
2075 /* add block[] to dest[] */
2076 static inline void add_dct(MpegEncContext *s,
2077  DCTELEM *block, int i, uint8_t *dest, int line_size)
2078 {
2079  if (s->block_last_index[i] >= 0) {
2080  s->dsp.idct_add (dest, line_size, block);
2081  }
2082 }
2083 
2084 static inline void add_dequant_dct(MpegEncContext *s,
2085  DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
2086 {
2087  if (s->block_last_index[i] >= 0) {
2088  s->dct_unquantize_inter(s, block, i, qscale);
2089 
2090  s->dsp.idct_add (dest, line_size, block);
2091  }
2092 }
2093 
2098 {
2099  int wrap = s->b8_stride;
2100  int xy = s->block_index[0];
2101 
2102  s->dc_val[0][xy ] =
2103  s->dc_val[0][xy + 1 ] =
2104  s->dc_val[0][xy + wrap] =
2105  s->dc_val[0][xy + 1 + wrap] = 1024;
2106  /* ac pred */
2107  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2108  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2109  if (s->msmpeg4_version>=3) {
2110  s->coded_block[xy ] =
2111  s->coded_block[xy + 1 ] =
2112  s->coded_block[xy + wrap] =
2113  s->coded_block[xy + 1 + wrap] = 0;
2114  }
2115  /* chroma */
2116  wrap = s->mb_stride;
2117  xy = s->mb_x + s->mb_y * wrap;
2118  s->dc_val[1][xy] =
2119  s->dc_val[2][xy] = 1024;
2120  /* ac pred */
2121  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2122  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2123 
2124  s->mbintra_table[xy]= 0;
2125 }
2126 
2127 /* generic function called after a macroblock has been parsed by the
2128  decoder or after it has been encoded by the encoder.
2129 
2130  Important variables used:
2131  s->mb_intra : true if intra macroblock
2132  s->mv_dir : motion vector direction
2133  s->mv_type : motion vector type
2134  s->mv : motion vector
2135  s->interlaced_dct : true if interlaced dct used (mpeg2)
2136  */
2137 static av_always_inline
2139  int is_mpeg12)
2140 {
2141  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2143  ff_xvmc_decode_mb(s);//xvmc uses pblocks
2144  return;
2145  }
2146 
2147  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2148  /* save DCT coefficients */
2149  int i,j;
2150  DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2151  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2152  for(i=0; i<6; i++){
2153  for(j=0; j<64; j++){
2154  *dct++ = block[i][s->dsp.idct_permutation[j]];
2155  av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2156  }
2157  av_log(s->avctx, AV_LOG_DEBUG, "\n");
2158  }
2159  }
2160 
2161  s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2162 
2163  /* update DC predictors for P macroblocks */
2164  if (!s->mb_intra) {
2165  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2166  if(s->mbintra_table[mb_xy])
2168  } else {
2169  s->last_dc[0] =
2170  s->last_dc[1] =
2171  s->last_dc[2] = 128 << s->intra_dc_precision;
2172  }
2173  }
2174  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2175  s->mbintra_table[mb_xy]=1;
2176 
2177  if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2178  uint8_t *dest_y, *dest_cb, *dest_cr;
2179  int dct_linesize, dct_offset;
2180  op_pixels_func (*op_pix)[4];
2181  qpel_mc_func (*op_qpix)[16];
2182  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2183  const int uvlinesize = s->current_picture.f.linesize[1];
2184  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band;
2185  const int block_size = 8;
2186 
2187  /* avoid copy if macroblock skipped in last frame too */
2188  /* skip only during decoding as we might trash the buffers during encoding a bit */
2189  if(!s->encoding){
2190  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2191 
2192  if (s->mb_skipped) {
2193  s->mb_skipped= 0;
2194  assert(s->pict_type!=AV_PICTURE_TYPE_I);
2195  *mbskip_ptr = 1;
2196  } else if(!s->current_picture.f.reference) {
2197  *mbskip_ptr = 1;
2198  } else{
2199  *mbskip_ptr = 0; /* not skipped */
2200  }
2201  }
2202 
2203  dct_linesize = linesize << s->interlaced_dct;
2204  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2205 
2206  if(readable){
2207  dest_y= s->dest[0];
2208  dest_cb= s->dest[1];
2209  dest_cr= s->dest[2];
2210  }else{
2211  dest_y = s->b_scratchpad;
2212  dest_cb= s->b_scratchpad+16*linesize;
2213  dest_cr= s->b_scratchpad+32*linesize;
2214  }
2215 
2216  if (!s->mb_intra) {
2217  /* motion handling */
2218  /* decoding or more than one mb_type (MC was already done otherwise) */
2219  if(!s->encoding){
2220 
2222  if (s->mv_dir & MV_DIR_FORWARD) {
2225  0);
2226  }
2227  if (s->mv_dir & MV_DIR_BACKWARD) {
2230  0);
2231  }
2232  }
2233 
2234  op_qpix= s->me.qpel_put;
2235  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2236  op_pix = s->dsp.put_pixels_tab;
2237  }else{
2238  op_pix = s->dsp.put_no_rnd_pixels_tab;
2239  }
2240  if (s->mv_dir & MV_DIR_FORWARD) {
2241  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2242  op_pix = s->dsp.avg_pixels_tab;
2243  op_qpix= s->me.qpel_avg;
2244  }
2245  if (s->mv_dir & MV_DIR_BACKWARD) {
2246  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2247  }
2248  }
2249 
2250  /* skip dequant / idct if we are really late ;) */
2251  if(s->avctx->skip_idct){
2254  || s->avctx->skip_idct >= AVDISCARD_ALL)
2255  goto skip_idct;
2256  }
2257 
2258  /* add dct residue */
2260  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2261  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2262  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2263  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2264  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2265 
2266  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2267  if (s->chroma_y_shift){
2268  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2269  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2270  }else{
2271  dct_linesize >>= 1;
2272  dct_offset >>=1;
2273  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2274  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2275  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2276  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2277  }
2278  }
2279  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2280  add_dct(s, block[0], 0, dest_y , dct_linesize);
2281  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2282  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2283  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2284 
2285  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2286  if(s->chroma_y_shift){//Chroma420
2287  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2288  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2289  }else{
2290  //chroma422
2291  dct_linesize = uvlinesize << s->interlaced_dct;
2292  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2293 
2294  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2295  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2296  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2297  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2298  if(!s->chroma_x_shift){//Chroma444
2299  add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2300  add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2301  add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2302  add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2303  }
2304  }
2305  }//fi gray
2306  }
2308  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2309  }
2310  } else {
2311  /* dct only in intra block */
2313  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2314  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2315  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2316  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2317 
2318  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2319  if(s->chroma_y_shift){
2320  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2321  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2322  }else{
2323  dct_offset >>=1;
2324  dct_linesize >>=1;
2325  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2326  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2327  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2328  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2329  }
2330  }
2331  }else{
2332  s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2333  s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2334  s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2335  s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2336 
2337  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2338  if(s->chroma_y_shift){
2339  s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2340  s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2341  }else{
2342 
2343  dct_linesize = uvlinesize << s->interlaced_dct;
2344  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize * 8;
2345 
2346  s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2347  s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2348  s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2349  s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2350  if(!s->chroma_x_shift){//Chroma444
2351  s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2352  s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2353  s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2354  s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2355  }
2356  }
2357  }//gray
2358  }
2359  }
2360 skip_idct:
2361  if(!readable){
2362  s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2363  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2364  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2365  }
2366  }
2367 }
2368 
2370 #if !CONFIG_SMALL
2371  if(s->out_format == FMT_MPEG1) {
2372  MPV_decode_mb_internal(s, block, 1);
2373  } else
2374 #endif
2375  MPV_decode_mb_internal(s, block, 0);
2376 }
2377 
2381 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2382  const int field_pic= s->picture_structure != PICT_FRAME;
2383  if(field_pic){
2384  h <<= 1;
2385  y <<= 1;
2386  }
2387 
2388  if (!s->avctx->hwaccel
2390  && s->unrestricted_mv
2392  && !s->intra_only
2393  && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2395  int sides = 0, edge_h;
2396  int hshift = desc->log2_chroma_w;
2397  int vshift = desc->log2_chroma_h;
2398  if (y==0) sides |= EDGE_TOP;
2399  if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2400 
2401  edge_h= FFMIN(h, s->v_edge_pos - y);
2402 
2403  s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize,
2404  s->linesize, s->h_edge_pos, edge_h,
2405  EDGE_WIDTH, EDGE_WIDTH, sides);
2406  s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize,
2407  s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2408  EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2409  s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize,
2410  s->uvlinesize, s->h_edge_pos>>hshift, edge_h>>vshift,
2411  EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2412  }
2413 
2414  h= FFMIN(h, s->avctx->height - y);
2415 
2416  if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2417 
2418  if (s->avctx->draw_horiz_band) {
2419  AVFrame *src;
2420  int offset[AV_NUM_DATA_POINTERS];
2421  int i;
2422 
2424  src = &s->current_picture_ptr->f;
2425  else if(s->last_picture_ptr)
2426  src = &s->last_picture_ptr->f;
2427  else
2428  return;
2429 
2431  for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2432  offset[i] = 0;
2433  }else{
2434  offset[0]= y * s->linesize;
2435  offset[1]=
2436  offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2437  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2438  offset[i] = 0;
2439  }
2440 
2441  emms_c();
2442 
2443  s->avctx->draw_horiz_band(s->avctx, src, offset,
2444  y, s->picture_structure, h);
2445  }
2446 }
2447 
2448 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2449  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2450  const int uvlinesize = s->current_picture.f.linesize[1];
2451  const int mb_size= 4;
2452 
2453  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2454  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2455  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2456  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2457  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2458  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2459  //block_index is not used by mpeg2, so it is not affected by chroma_format
2460 
2461  s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2462  s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2463  s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2464 
2466  {
2467  if(s->picture_structure==PICT_FRAME){
2468  s->dest[0] += s->mb_y * linesize << mb_size;
2469  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2470  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2471  }else{
2472  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2473  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2474  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2475  assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2476  }
2477  }
2478 }
2479 
2481  int i;
2482  MpegEncContext *s = avctx->priv_data;
2483 
2484  if(s==NULL || s->picture==NULL)
2485  return;
2486 
2487  for(i=0; i<s->picture_count; i++){
2488  if (s->picture[i].f.data[0] &&
2489  (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2490  s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2491  free_frame_buffer(s, &s->picture[i]);
2492  }
2494 
2495  s->mb_x= s->mb_y= 0;
2496 
2497  s->parse_context.state= -1;
2499  s->parse_context.overread= 0;
2501  s->parse_context.index= 0;
2502  s->parse_context.last_index= 0;
2503  s->bitstream_buffer_size=0;
2504  s->pp_time=0;
2505 }
2506 
2508  DCTELEM *block, int n, int qscale)
2509 {
2510  int i, level, nCoeffs;
2511  const uint16_t *quant_matrix;
2512 
2513  nCoeffs= s->block_last_index[n];
2514 
2515  if (n < 4)
2516  block[0] = block[0] * s->y_dc_scale;
2517  else
2518  block[0] = block[0] * s->c_dc_scale;
2519  /* XXX: only mpeg1 */
2520  quant_matrix = s->intra_matrix;
2521  for(i=1;i<=nCoeffs;i++) {
2522  int j= s->intra_scantable.permutated[i];
2523  level = block[j];
2524  if (level) {
2525  if (level < 0) {
2526  level = -level;
2527  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2528  level = (level - 1) | 1;
2529  level = -level;
2530  } else {
2531  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2532  level = (level - 1) | 1;
2533  }
2534  block[j] = level;
2535  }
2536  }
2537 }
2538 
2540  DCTELEM *block, int n, int qscale)
2541 {
2542  int i, level, nCoeffs;
2543  const uint16_t *quant_matrix;
2544 
2545  nCoeffs= s->block_last_index[n];
2546 
2547  quant_matrix = s->inter_matrix;
2548  for(i=0; i<=nCoeffs; i++) {
2549  int j= s->intra_scantable.permutated[i];
2550  level = block[j];
2551  if (level) {
2552  if (level < 0) {
2553  level = -level;
2554  level = (((level << 1) + 1) * qscale *
2555  ((int) (quant_matrix[j]))) >> 4;
2556  level = (level - 1) | 1;
2557  level = -level;
2558  } else {
2559  level = (((level << 1) + 1) * qscale *
2560  ((int) (quant_matrix[j]))) >> 4;
2561  level = (level - 1) | 1;
2562  }
2563  block[j] = level;
2564  }
2565  }
2566 }
2567 
2569  DCTELEM *block, int n, int qscale)
2570 {
2571  int i, level, nCoeffs;
2572  const uint16_t *quant_matrix;
2573 
2574  if(s->alternate_scan) nCoeffs= 63;
2575  else nCoeffs= s->block_last_index[n];
2576 
2577  if (n < 4)
2578  block[0] = block[0] * s->y_dc_scale;
2579  else
2580  block[0] = block[0] * s->c_dc_scale;
2581  quant_matrix = s->intra_matrix;
2582  for(i=1;i<=nCoeffs;i++) {
2583  int j= s->intra_scantable.permutated[i];
2584  level = block[j];
2585  if (level) {
2586  if (level < 0) {
2587  level = -level;
2588  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2589  level = -level;
2590  } else {
2591  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2592  }
2593  block[j] = level;
2594  }
2595  }
2596 }
2597 
2599  DCTELEM *block, int n, int qscale)
2600 {
2601  int i, level, nCoeffs;
2602  const uint16_t *quant_matrix;
2603  int sum=-1;
2604 
2605  if(s->alternate_scan) nCoeffs= 63;
2606  else nCoeffs= s->block_last_index[n];
2607 
2608  if (n < 4)
2609  block[0] = block[0] * s->y_dc_scale;
2610  else
2611  block[0] = block[0] * s->c_dc_scale;
2612  quant_matrix = s->intra_matrix;
2613  for(i=1;i<=nCoeffs;i++) {
2614  int j= s->intra_scantable.permutated[i];
2615  level = block[j];
2616  if (level) {
2617  if (level < 0) {
2618  level = -level;
2619  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2620  level = -level;
2621  } else {
2622  level = (int)(level * qscale * quant_matrix[j]) >> 3;
2623  }
2624  block[j] = level;
2625  sum+=level;
2626  }
2627  }
2628  block[63]^=sum&1;
2629 }
2630 
2632  DCTELEM *block, int n, int qscale)
2633 {
2634  int i, level, nCoeffs;
2635  const uint16_t *quant_matrix;
2636  int sum=-1;
2637 
2638  if(s->alternate_scan) nCoeffs= 63;
2639  else nCoeffs= s->block_last_index[n];
2640 
2641  quant_matrix = s->inter_matrix;
2642  for(i=0; i<=nCoeffs; i++) {
2643  int j= s->intra_scantable.permutated[i];
2644  level = block[j];
2645  if (level) {
2646  if (level < 0) {
2647  level = -level;
2648  level = (((level << 1) + 1) * qscale *
2649  ((int) (quant_matrix[j]))) >> 4;
2650  level = -level;
2651  } else {
2652  level = (((level << 1) + 1) * qscale *
2653  ((int) (quant_matrix[j]))) >> 4;
2654  }
2655  block[j] = level;
2656  sum+=level;
2657  }
2658  }
2659  block[63]^=sum&1;
2660 }
2661 
2663  DCTELEM *block, int n, int qscale)
2664 {
2665  int i, level, qmul, qadd;
2666  int nCoeffs;
2667 
2668  assert(s->block_last_index[n]>=0);
2669 
2670  qmul = qscale << 1;
2671 
2672  if (!s->h263_aic) {
2673  if (n < 4)
2674  block[0] = block[0] * s->y_dc_scale;
2675  else
2676  block[0] = block[0] * s->c_dc_scale;
2677  qadd = (qscale - 1) | 1;
2678  }else{
2679  qadd = 0;
2680  }
2681  if(s->ac_pred)
2682  nCoeffs=63;
2683  else
2684  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2685 
2686  for(i=1; i<=nCoeffs; i++) {
2687  level = block[i];
2688  if (level) {
2689  if (level < 0) {
2690  level = level * qmul - qadd;
2691  } else {
2692  level = level * qmul + qadd;
2693  }
2694  block[i] = level;
2695  }
2696  }
2697 }
2698 
2700  DCTELEM *block, int n, int qscale)
2701 {
2702  int i, level, qmul, qadd;
2703  int nCoeffs;
2704 
2705  assert(s->block_last_index[n]>=0);
2706 
2707  qadd = (qscale - 1) | 1;
2708  qmul = qscale << 1;
2709 
2710  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2711 
2712  for(i=0; i<=nCoeffs; i++) {
2713  level = block[i];
2714  if (level) {
2715  if (level < 0) {
2716  level = level * qmul - qadd;
2717  } else {
2718  level = level * qmul + qadd;
2719  }
2720  block[i] = level;
2721  }
2722  }
2723 }
2724 
2728 void ff_set_qscale(MpegEncContext * s, int qscale)
2729 {
2730  if (qscale < 1)
2731  qscale = 1;
2732  else if (qscale > 31)
2733  qscale = 31;
2734 
2735  s->qscale = qscale;
2736  s->chroma_qscale= s->chroma_qscale_table[qscale];
2737 
2738  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2740 }
2741 
2743 {
2746 }