Libav 0.7.1
libavcodec/mpegvideo.c
Go to the documentation of this file.
00001 /*
00002  * The simplest mpeg encoder (well, it was the simplest!)
00003  * Copyright (c) 2000,2001 Fabrice Bellard
00004  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
00005  *
00006  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
00007  *
00008  * This file is part of Libav.
00009  *
00010  * Libav is free software; you can redistribute it and/or
00011  * modify it under the terms of the GNU Lesser General Public
00012  * License as published by the Free Software Foundation; either
00013  * version 2.1 of the License, or (at your option) any later version.
00014  *
00015  * Libav is distributed in the hope that it will be useful,
00016  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00017  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018  * Lesser General Public License for more details.
00019  *
00020  * You should have received a copy of the GNU Lesser General Public
00021  * License along with Libav; if not, write to the Free Software
00022  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00023  */
00024 
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include "thread.h"
00042 #include <limits.h>
00043 
00044 //#undef NDEBUG
00045 //#include <assert.h>
00046 
00047 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00048                                    DCTELEM *block, int n, int qscale);
00049 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00050                                    DCTELEM *block, int n, int qscale);
00051 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00052                                    DCTELEM *block, int n, int qscale);
00053 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00054                                    DCTELEM *block, int n, int qscale);
00055 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00056                                    DCTELEM *block, int n, int qscale);
00057 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00058                                   DCTELEM *block, int n, int qscale);
00059 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00060                                   DCTELEM *block, int n, int qscale);
00061 
00062 
00063 /* enable all paranoid tests for rounding, overflows, etc... */
00064 //#define PARANOID
00065 
00066 //#define DEBUG
00067 
00068 
00069 static const uint8_t ff_default_chroma_qscale_table[32]={
00070 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00071     0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
00072 };
00073 
00074 const uint8_t ff_mpeg1_dc_scale_table[128]={
00075 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00076     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00077     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079     8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00080 };
00081 
00082 static const uint8_t mpeg2_dc_scale_table1[128]={
00083 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00084     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00085     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00086     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00087     4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00088 };
00089 
00090 static const uint8_t mpeg2_dc_scale_table2[128]={
00091 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00092     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00093     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00094     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00095     2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00096 };
00097 
00098 static const uint8_t mpeg2_dc_scale_table3[128]={
00099 //  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
00100     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00101     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00102     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00103     1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00104 };
00105 
00106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
00107     ff_mpeg1_dc_scale_table,
00108     mpeg2_dc_scale_table1,
00109     mpeg2_dc_scale_table2,
00110     mpeg2_dc_scale_table3,
00111 };
00112 
00113 const enum PixelFormat ff_pixfmt_list_420[] = {
00114     PIX_FMT_YUV420P,
00115     PIX_FMT_NONE
00116 };
00117 
00118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00119     PIX_FMT_DXVA2_VLD,
00120     PIX_FMT_VAAPI_VLD,
00121     PIX_FMT_YUV420P,
00122     PIX_FMT_NONE
00123 };
00124 
00125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
00126     int i;
00127 
00128     assert(p<=end);
00129     if(p>=end)
00130         return end;
00131 
00132     for(i=0; i<3; i++){
00133         uint32_t tmp= *state << 8;
00134         *state= tmp + *(p++);
00135         if(tmp == 0x100 || p==end)
00136             return p;
00137     }
00138 
00139     while(p<end){
00140         if     (p[-1] > 1      ) p+= 3;
00141         else if(p[-2]          ) p+= 2;
00142         else if(p[-3]|(p[-1]-1)) p++;
00143         else{
00144             p++;
00145             break;
00146         }
00147     }
00148 
00149     p= FFMIN(p, end)-4;
00150     *state= AV_RB32(p);
00151 
00152     return p+4;
00153 }
00154 
00155 /* init common dct for both encoder and decoder */
00156 av_cold int ff_dct_common_init(MpegEncContext *s)
00157 {
00158     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00159     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00160     s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00161     s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00162     s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00163     if(s->flags & CODEC_FLAG_BITEXACT)
00164         s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00165     s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00166 
00167 #if   HAVE_MMX
00168     MPV_common_init_mmx(s);
00169 #elif ARCH_ALPHA
00170     MPV_common_init_axp(s);
00171 #elif CONFIG_MLIB
00172     MPV_common_init_mlib(s);
00173 #elif HAVE_MMI
00174     MPV_common_init_mmi(s);
00175 #elif ARCH_ARM
00176     MPV_common_init_arm(s);
00177 #elif HAVE_ALTIVEC
00178     MPV_common_init_altivec(s);
00179 #elif ARCH_BFIN
00180     MPV_common_init_bfin(s);
00181 #endif
00182 
00183     /* load & permutate scantables
00184        note: only wmv uses different ones
00185     */
00186     if(s->alternate_scan){
00187         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
00188         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
00189     }else{
00190         ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
00191         ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
00192     }
00193     ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00194     ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00195 
00196     return 0;
00197 }
00198 
00199 void ff_copy_picture(Picture *dst, Picture *src){
00200     *dst = *src;
00201     dst->type= FF_BUFFER_TYPE_COPY;
00202 }
00203 
00207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00208 {
00209     ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
00210     av_freep(&pic->hwaccel_picture_private);
00211 }
00212 
00216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00217 {
00218     int r;
00219 
00220     if (s->avctx->hwaccel) {
00221         assert(!pic->hwaccel_picture_private);
00222         if (s->avctx->hwaccel->priv_data_size) {
00223             pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00224             if (!pic->hwaccel_picture_private) {
00225                 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00226                 return -1;
00227             }
00228         }
00229     }
00230 
00231     r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
00232 
00233     if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
00234         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
00235         av_freep(&pic->hwaccel_picture_private);
00236         return -1;
00237     }
00238 
00239     if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
00240         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
00241         free_frame_buffer(s, pic);
00242         return -1;
00243     }
00244 
00245     if (pic->linesize[1] != pic->linesize[2]) {
00246         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
00247         free_frame_buffer(s, pic);
00248         return -1;
00249     }
00250 
00251     return 0;
00252 }
00253 
00258 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
00259     const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
00260     const int mb_array_size= s->mb_stride*s->mb_height;
00261     const int b8_array_size= s->b8_stride*s->mb_height*2;
00262     const int b4_array_size= s->b4_stride*s->mb_height*4;
00263     int i;
00264     int r= -1;
00265 
00266     if(shared){
00267         assert(pic->data[0]);
00268         assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
00269         pic->type= FF_BUFFER_TYPE_SHARED;
00270     }else{
00271         assert(!pic->data[0]);
00272 
00273         if (alloc_frame_buffer(s, pic) < 0)
00274             return -1;
00275 
00276         s->linesize  = pic->linesize[0];
00277         s->uvlinesize= pic->linesize[1];
00278     }
00279 
00280     if(pic->qscale_table==NULL){
00281         if (s->encoding) {
00282             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var   , mb_array_size * sizeof(int16_t)  , fail)
00283             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t)  , fail)
00284             FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean  , mb_array_size * sizeof(int8_t )  , fail)
00285         }
00286 
00287         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
00288         FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t)  , fail)
00289         FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
00290         pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
00291         pic->qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
00292         if(s->out_format == FMT_H264){
00293             for(i=0; i<2; i++){
00294                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4)  * sizeof(int16_t), fail)
00295                 pic->motion_val[i]= pic->motion_val_base[i]+4;
00296                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00297             }
00298             pic->motion_subsample_log2= 2;
00299         }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
00300             for(i=0; i<2; i++){
00301                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
00302                 pic->motion_val[i]= pic->motion_val_base[i]+4;
00303                 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00304             }
00305             pic->motion_subsample_log2= 3;
00306         }
00307         if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00308             FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
00309         }
00310         pic->qstride= s->mb_stride;
00311         FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
00312     }
00313 
00314     /* It might be nicer if the application would keep track of these
00315      * but it would require an API change. */
00316     memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
00317     s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
00318     if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
00319         pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
00320     pic->owner2 = NULL;
00321 
00322     return 0;
00323 fail: //for the FF_ALLOCZ_OR_GOTO macro
00324     if(r>=0)
00325         free_frame_buffer(s, pic);
00326     return -1;
00327 }
00328 
00332 static void free_picture(MpegEncContext *s, Picture *pic){
00333     int i;
00334 
00335     if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
00336         free_frame_buffer(s, pic);
00337     }
00338 
00339     av_freep(&pic->mb_var);
00340     av_freep(&pic->mc_mb_var);
00341     av_freep(&pic->mb_mean);
00342     av_freep(&pic->mbskip_table);
00343     av_freep(&pic->qscale_table_base);
00344     av_freep(&pic->mb_type_base);
00345     av_freep(&pic->dct_coeff);
00346     av_freep(&pic->pan_scan);
00347     pic->mb_type= NULL;
00348     for(i=0; i<2; i++){
00349         av_freep(&pic->motion_val_base[i]);
00350         av_freep(&pic->ref_index[i]);
00351     }
00352 
00353     if(pic->type == FF_BUFFER_TYPE_SHARED){
00354         for(i=0; i<4; i++){
00355             pic->base[i]=
00356             pic->data[i]= NULL;
00357         }
00358         pic->type= 0;
00359     }
00360 }
00361 
00362 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
00363     int y_size = s->b8_stride * (2 * s->mb_height + 1);
00364     int c_size = s->mb_stride * (s->mb_height + 1);
00365     int yc_size = y_size + 2 * c_size;
00366     int i;
00367 
00368     // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
00369     FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
00370     s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
00371 
00372      //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
00373     FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,  (s->width+64)*4*16*2*sizeof(uint8_t), fail)
00374     s->me.temp=         s->me.scratchpad;
00375     s->rd_scratchpad=   s->me.scratchpad;
00376     s->b_scratchpad=    s->me.scratchpad;
00377     s->obmc_scratchpad= s->me.scratchpad + 16;
00378     if (s->encoding) {
00379         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map      , ME_MAP_SIZE*sizeof(uint32_t), fail)
00380         FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
00381         if(s->avctx->noise_reduction){
00382             FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
00383         }
00384     }
00385     FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
00386     s->block= s->blocks[0];
00387 
00388     for(i=0;i<12;i++){
00389         s->pblocks[i] = &s->block[i];
00390     }
00391 
00392     if (s->out_format == FMT_H263) {
00393         /* ac values */
00394         FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
00395         s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00396         s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00397         s->ac_val[2] = s->ac_val[1] + c_size;
00398     }
00399 
00400     return 0;
00401 fail:
00402     return -1; //free() through MPV_common_end()
00403 }
00404 
00405 static void free_duplicate_context(MpegEncContext *s){
00406     if(s==NULL) return;
00407 
00408     av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
00409     av_freep(&s->me.scratchpad);
00410     s->me.temp=
00411     s->rd_scratchpad=
00412     s->b_scratchpad=
00413     s->obmc_scratchpad= NULL;
00414 
00415     av_freep(&s->dct_error_sum);
00416     av_freep(&s->me.map);
00417     av_freep(&s->me.score_map);
00418     av_freep(&s->blocks);
00419     av_freep(&s->ac_val_base);
00420     s->block= NULL;
00421 }
00422 
00423 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
00424 #define COPY(a) bak->a= src->a
00425     COPY(allocated_edge_emu_buffer);
00426     COPY(edge_emu_buffer);
00427     COPY(me.scratchpad);
00428     COPY(me.temp);
00429     COPY(rd_scratchpad);
00430     COPY(b_scratchpad);
00431     COPY(obmc_scratchpad);
00432     COPY(me.map);
00433     COPY(me.score_map);
00434     COPY(blocks);
00435     COPY(block);
00436     COPY(start_mb_y);
00437     COPY(end_mb_y);
00438     COPY(me.map_generation);
00439     COPY(pb);
00440     COPY(dct_error_sum);
00441     COPY(dct_count[0]);
00442     COPY(dct_count[1]);
00443     COPY(ac_val_base);
00444     COPY(ac_val[0]);
00445     COPY(ac_val[1]);
00446     COPY(ac_val[2]);
00447 #undef COPY
00448 }
00449 
00450 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
00451     MpegEncContext bak;
00452     int i;
00453     //FIXME copy only needed parts
00454 //START_TIMER
00455     backup_duplicate_context(&bak, dst);
00456     memcpy(dst, src, sizeof(MpegEncContext));
00457     backup_duplicate_context(dst, &bak);
00458     for(i=0;i<12;i++){
00459         dst->pblocks[i] = &dst->block[i];
00460     }
00461 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
00462 }
00463 
00464 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
00465 {
00466     MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
00467 
00468     if(dst == src || !s1->context_initialized) return 0;
00469 
00470     //FIXME can parameters change on I-frames? in that case dst may need a reinit
00471     if(!s->context_initialized){
00472         memcpy(s, s1, sizeof(MpegEncContext));
00473 
00474         s->avctx                 = dst;
00475         s->picture_range_start  += MAX_PICTURE_COUNT;
00476         s->picture_range_end    += MAX_PICTURE_COUNT;
00477         s->bitstream_buffer      = NULL;
00478         s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
00479 
00480         MPV_common_init(s);
00481     }
00482 
00483     s->avctx->coded_height  = s1->avctx->coded_height;
00484     s->avctx->coded_width   = s1->avctx->coded_width;
00485     s->avctx->width         = s1->avctx->width;
00486     s->avctx->height        = s1->avctx->height;
00487 
00488     s->coded_picture_number = s1->coded_picture_number;
00489     s->picture_number       = s1->picture_number;
00490     s->input_picture_number = s1->input_picture_number;
00491 
00492     memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
00493     memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
00494 
00495     s->last_picture_ptr     = REBASE_PICTURE(s1->last_picture_ptr,    s, s1);
00496     s->current_picture_ptr  = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
00497     s->next_picture_ptr     = REBASE_PICTURE(s1->next_picture_ptr,    s, s1);
00498 
00499     memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
00500 
00501     //Error/bug resilience
00502     s->next_p_frame_damaged = s1->next_p_frame_damaged;
00503     s->workaround_bugs      = s1->workaround_bugs;
00504 
00505     //MPEG4 timing info
00506     memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
00507 
00508     //B-frame info
00509     s->max_b_frames         = s1->max_b_frames;
00510     s->low_delay            = s1->low_delay;
00511     s->dropable             = s1->dropable;
00512 
00513     //DivX handling (doesn't work)
00514     s->divx_packed          = s1->divx_packed;
00515 
00516     if(s1->bitstream_buffer){
00517         if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
00518             av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
00519         s->bitstream_buffer_size  = s1->bitstream_buffer_size;
00520         memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
00521         memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
00522     }
00523 
00524     //MPEG2/interlacing info
00525     memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
00526 
00527     if(!s1->first_field){
00528         s->last_pict_type= s1->pict_type;
00529         if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->quality;
00530 
00531         if(s1->pict_type!=FF_B_TYPE){
00532             s->last_non_b_pict_type= s1->pict_type;
00533         }
00534     }
00535 
00536     return 0;
00537 }
00538 
00543 void MPV_common_defaults(MpegEncContext *s){
00544     s->y_dc_scale_table=
00545     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
00546     s->chroma_qscale_table= ff_default_chroma_qscale_table;
00547     s->progressive_frame= 1;
00548     s->progressive_sequence= 1;
00549     s->picture_structure= PICT_FRAME;
00550 
00551     s->coded_picture_number = 0;
00552     s->picture_number = 0;
00553     s->input_picture_number = 0;
00554 
00555     s->picture_in_gop_number = 0;
00556 
00557     s->f_code = 1;
00558     s->b_code = 1;
00559 
00560     s->picture_range_start = 0;
00561     s->picture_range_end = MAX_PICTURE_COUNT;
00562 }
00563 
00568 void MPV_decode_defaults(MpegEncContext *s){
00569     MPV_common_defaults(s);
00570 }
00571 
00576 av_cold int MPV_common_init(MpegEncContext *s)
00577 {
00578     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
00579 
00580     if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00581         s->mb_height = (s->height + 31) / 32 * 2;
00582     else if (s->codec_id != CODEC_ID_H264)
00583         s->mb_height = (s->height + 15) / 16;
00584 
00585     if(s->avctx->pix_fmt == PIX_FMT_NONE){
00586         av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
00587         return -1;
00588     }
00589 
00590     if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
00591        (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
00592         av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
00593         return -1;
00594     }
00595 
00596     if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
00597         return -1;
00598 
00599     dsputil_init(&s->dsp, s->avctx);
00600     ff_dct_common_init(s);
00601 
00602     s->flags= s->avctx->flags;
00603     s->flags2= s->avctx->flags2;
00604 
00605     if (s->width && s->height) {
00606         s->mb_width  = (s->width  + 15) / 16;
00607         s->mb_stride = s->mb_width + 1;
00608         s->b8_stride = s->mb_width*2 + 1;
00609         s->b4_stride = s->mb_width*4 + 1;
00610         mb_array_size= s->mb_height * s->mb_stride;
00611         mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
00612 
00613         /* set chroma shifts */
00614         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
00615                                       &(s->chroma_y_shift) );
00616 
00617         /* set default edge pos, will be overriden in decode_header if needed */
00618         s->h_edge_pos= s->mb_width*16;
00619         s->v_edge_pos= s->mb_height*16;
00620 
00621         s->mb_num = s->mb_width * s->mb_height;
00622 
00623         s->block_wrap[0]=
00624         s->block_wrap[1]=
00625         s->block_wrap[2]=
00626         s->block_wrap[3]= s->b8_stride;
00627         s->block_wrap[4]=
00628         s->block_wrap[5]= s->mb_stride;
00629 
00630         y_size = s->b8_stride * (2 * s->mb_height + 1);
00631         c_size = s->mb_stride * (s->mb_height + 1);
00632         yc_size = y_size + 2 * c_size;
00633 
00634         /* convert fourcc to upper case */
00635         s->codec_tag = ff_toupper4(s->avctx->codec_tag);
00636 
00637         s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
00638 
00639         s->avctx->coded_frame= (AVFrame*)&s->current_picture;
00640 
00641         FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
00642         for(y=0; y<s->mb_height; y++){
00643             for(x=0; x<s->mb_width; x++){
00644                 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
00645             }
00646         }
00647         s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
00648 
00649         if (s->encoding) {
00650             /* Allocate MV tables */
00651             FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t), fail)
00652             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00653             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
00654             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00655             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00656             FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t), fail)
00657             s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
00658             s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
00659             s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
00660             s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00661             s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00662             s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
00663 
00664             if(s->msmpeg4_version){
00665                 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
00666             }
00667             FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00668 
00669             /* Allocate MB type table */
00670             FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type  , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
00671 
00672             FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00673 
00674             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix  , 64*32   * sizeof(int), fail)
00675             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix  , 64*32   * sizeof(int), fail)
00676             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00677             FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00678             FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00679             FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00680 
00681             if(s->avctx->noise_reduction){
00682                 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
00683             }
00684         }
00685     }
00686 
00687     s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
00688     FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
00689     for(i = 0; i < s->picture_count; i++) {
00690         avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
00691     }
00692 
00693     if (s->width && s->height) {
00694         FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
00695 
00696         if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00697             /* interlaced direct mode decoding tables */
00698             for(i=0; i<2; i++){
00699                 int j, k;
00700                 for(j=0; j<2; j++){
00701                     for(k=0; k<2; k++){
00702                         FF_ALLOCZ_OR_GOTO(s->avctx,    s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
00703                         s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00704                     }
00705                     FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00706                     FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00707                     s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
00708                 }
00709                 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00710             }
00711         }
00712         if (s->out_format == FMT_H263) {
00713             /* cbp values */
00714             FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00715             s->coded_block= s->coded_block_base + s->b8_stride + 1;
00716 
00717             /* cbp, ac_pred, pred_dir */
00718             FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail)
00719             FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
00720         }
00721 
00722         if (s->h263_pred || s->h263_plus || !s->encoding) {
00723             /* dc values */
00724             //MN: we need these for error resilience of intra-frames
00725             FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00726             s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00727             s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00728             s->dc_val[2] = s->dc_val[1] + c_size;
00729             for(i=0;i<yc_size;i++)
00730                 s->dc_val_base[i] = 1024;
00731         }
00732 
00733         /* which mb is a intra block */
00734         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00735         memset(s->mbintra_table, 1, mb_array_size);
00736 
00737         /* init macroblock skip table */
00738         FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
00739         //Note the +1 is for a quicker mpeg4 slice_end detection
00740         FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
00741 
00742         s->parse_context.state= -1;
00743         if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
00744             s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00745             s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00746             s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00747         }
00748     }
00749 
00750     s->context_initialized = 1;
00751     s->thread_context[0]= s;
00752 
00753     if (s->width && s->height) {
00754     if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
00755         threads = s->avctx->thread_count;
00756 
00757         for(i=1; i<threads; i++){
00758             s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
00759             memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00760         }
00761 
00762         for(i=0; i<threads; i++){
00763             if(init_duplicate_context(s->thread_context[i], s) < 0)
00764                 goto fail;
00765             s->thread_context[i]->start_mb_y= (s->mb_height*(i  ) + s->avctx->thread_count/2) / s->avctx->thread_count;
00766             s->thread_context[i]->end_mb_y  = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
00767         }
00768     } else {
00769         if(init_duplicate_context(s, s) < 0) goto fail;
00770         s->start_mb_y = 0;
00771         s->end_mb_y   = s->mb_height;
00772     }
00773     }
00774 
00775     return 0;
00776  fail:
00777     MPV_common_end(s);
00778     return -1;
00779 }
00780 
00781 /* init common structure for both encoder and decoder */
00782 void MPV_common_end(MpegEncContext *s)
00783 {
00784     int i, j, k;
00785 
00786     if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
00787         for(i=0; i<s->avctx->thread_count; i++){
00788             free_duplicate_context(s->thread_context[i]);
00789         }
00790         for(i=1; i<s->avctx->thread_count; i++){
00791             av_freep(&s->thread_context[i]);
00792         }
00793     } else free_duplicate_context(s);
00794 
00795     av_freep(&s->parse_context.buffer);
00796     s->parse_context.buffer_size=0;
00797 
00798     av_freep(&s->mb_type);
00799     av_freep(&s->p_mv_table_base);
00800     av_freep(&s->b_forw_mv_table_base);
00801     av_freep(&s->b_back_mv_table_base);
00802     av_freep(&s->b_bidir_forw_mv_table_base);
00803     av_freep(&s->b_bidir_back_mv_table_base);
00804     av_freep(&s->b_direct_mv_table_base);
00805     s->p_mv_table= NULL;
00806     s->b_forw_mv_table= NULL;
00807     s->b_back_mv_table= NULL;
00808     s->b_bidir_forw_mv_table= NULL;
00809     s->b_bidir_back_mv_table= NULL;
00810     s->b_direct_mv_table= NULL;
00811     for(i=0; i<2; i++){
00812         for(j=0; j<2; j++){
00813             for(k=0; k<2; k++){
00814                 av_freep(&s->b_field_mv_table_base[i][j][k]);
00815                 s->b_field_mv_table[i][j][k]=NULL;
00816             }
00817             av_freep(&s->b_field_select_table[i][j]);
00818             av_freep(&s->p_field_mv_table_base[i][j]);
00819             s->p_field_mv_table[i][j]=NULL;
00820         }
00821         av_freep(&s->p_field_select_table[i]);
00822     }
00823 
00824     av_freep(&s->dc_val_base);
00825     av_freep(&s->coded_block_base);
00826     av_freep(&s->mbintra_table);
00827     av_freep(&s->cbp_table);
00828     av_freep(&s->pred_dir_table);
00829 
00830     av_freep(&s->mbskip_table);
00831     av_freep(&s->prev_pict_types);
00832     av_freep(&s->bitstream_buffer);
00833     s->allocated_bitstream_buffer_size=0;
00834 
00835     av_freep(&s->avctx->stats_out);
00836     av_freep(&s->ac_stats);
00837     av_freep(&s->error_status_table);
00838     av_freep(&s->mb_index2xy);
00839     av_freep(&s->lambda_table);
00840     av_freep(&s->q_intra_matrix);
00841     av_freep(&s->q_inter_matrix);
00842     av_freep(&s->q_intra_matrix16);
00843     av_freep(&s->q_inter_matrix16);
00844     av_freep(&s->input_picture);
00845     av_freep(&s->reordered_input_picture);
00846     av_freep(&s->dct_offset);
00847 
00848     if(s->picture && !s->avctx->is_copy){
00849         for(i=0; i<s->picture_count; i++){
00850             free_picture(s, &s->picture[i]);
00851         }
00852     }
00853     av_freep(&s->picture);
00854     s->context_initialized = 0;
00855     s->last_picture_ptr=
00856     s->next_picture_ptr=
00857     s->current_picture_ptr= NULL;
00858     s->linesize= s->uvlinesize= 0;
00859 
00860     for(i=0; i<3; i++)
00861         av_freep(&s->visualization_buffer[i]);
00862 
00863     if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
00864         avcodec_default_free_buffers(s->avctx);
00865 }
00866 
00867 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
00868 {
00869     int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
00870     uint8_t index_run[MAX_RUN+1];
00871     int last, run, level, start, end, i;
00872 
00873     /* If table is static, we can quit if rl->max_level[0] is not NULL */
00874     if(static_store && rl->max_level[0])
00875         return;
00876 
00877     /* compute max_level[], max_run[] and index_run[] */
00878     for(last=0;last<2;last++) {
00879         if (last == 0) {
00880             start = 0;
00881             end = rl->last;
00882         } else {
00883             start = rl->last;
00884             end = rl->n;
00885         }
00886 
00887         memset(max_level, 0, MAX_RUN + 1);
00888         memset(max_run, 0, MAX_LEVEL + 1);
00889         memset(index_run, rl->n, MAX_RUN + 1);
00890         for(i=start;i<end;i++) {
00891             run = rl->table_run[i];
00892             level = rl->table_level[i];
00893             if (index_run[run] == rl->n)
00894                 index_run[run] = i;
00895             if (level > max_level[run])
00896                 max_level[run] = level;
00897             if (run > max_run[level])
00898                 max_run[level] = run;
00899         }
00900         if(static_store)
00901             rl->max_level[last] = static_store[last];
00902         else
00903             rl->max_level[last] = av_malloc(MAX_RUN + 1);
00904         memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00905         if(static_store)
00906             rl->max_run[last] = static_store[last] + MAX_RUN + 1;
00907         else
00908             rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
00909         memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
00910         if(static_store)
00911             rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
00912         else
00913             rl->index_run[last] = av_malloc(MAX_RUN + 1);
00914         memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
00915     }
00916 }
00917 
00918 void init_vlc_rl(RLTable *rl)
00919 {
00920     int i, q;
00921 
00922     for(q=0; q<32; q++){
00923         int qmul= q*2;
00924         int qadd= (q-1)|1;
00925 
00926         if(q==0){
00927             qmul=1;
00928             qadd=0;
00929         }
00930         for(i=0; i<rl->vlc.table_size; i++){
00931             int code= rl->vlc.table[i][0];
00932             int len = rl->vlc.table[i][1];
00933             int level, run;
00934 
00935             if(len==0){ // illegal code
00936                 run= 66;
00937                 level= MAX_LEVEL;
00938             }else if(len<0){ //more bits needed
00939                 run= 0;
00940                 level= code;
00941             }else{
00942                 if(code==rl->n){ //esc
00943                     run= 66;
00944                     level= 0;
00945                 }else{
00946                     run=   rl->table_run  [code] + 1;
00947                     level= rl->table_level[code] * qmul + qadd;
00948                     if(code >= rl->last) run+=192;
00949                 }
00950             }
00951             rl->rl_vlc[q][i].len= len;
00952             rl->rl_vlc[q][i].level= level;
00953             rl->rl_vlc[q][i].run= run;
00954         }
00955     }
00956 }
00957 
00958 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
00959 {
00960     int i;
00961 
00962     /* release non reference frames */
00963     for(i=0; i<s->picture_count; i++){
00964         if(s->picture[i].data[0] && !s->picture[i].reference
00965            && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
00966            && (remove_current || &s->picture[i] != s->current_picture_ptr)
00967            /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
00968             free_frame_buffer(s, &s->picture[i]);
00969         }
00970     }
00971 }
00972 
00973 int ff_find_unused_picture(MpegEncContext *s, int shared){
00974     int i;
00975 
00976     if(shared){
00977         for(i=s->picture_range_start; i<s->picture_range_end; i++){
00978             if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
00979         }
00980     }else{
00981         for(i=s->picture_range_start; i<s->picture_range_end; i++){
00982             if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
00983         }
00984         for(i=s->picture_range_start; i<s->picture_range_end; i++){
00985             if(s->picture[i].data[0]==NULL) return i;
00986         }
00987     }
00988 
00989     av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
00990     /* We could return -1, but the codec would crash trying to draw into a
00991      * non-existing frame anyway. This is safer than waiting for a random crash.
00992      * Also the return of this is never useful, an encoder must only allocate
00993      * as much as allowed in the specification. This has no relationship to how
00994      * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
00995      * enough for such valid streams).
00996      * Plus, a decoder has to check stream validity and remove frames if too
00997      * many reference frames are around. Waiting for "OOM" is not correct at
00998      * all. Similarly, missing reference frames have to be replaced by
00999      * interpolated/MC frames, anything else is a bug in the codec ...
01000      */
01001     abort();
01002     return -1;
01003 }
01004 
01005 static void update_noise_reduction(MpegEncContext *s){
01006     int intra, i;
01007 
01008     for(intra=0; intra<2; intra++){
01009         if(s->dct_count[intra] > (1<<16)){
01010             for(i=0; i<64; i++){
01011                 s->dct_error_sum[intra][i] >>=1;
01012             }
01013             s->dct_count[intra] >>= 1;
01014         }
01015 
01016         for(i=0; i<64; i++){
01017             s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
01018         }
01019     }
01020 }
01021 
01025 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
01026 {
01027     int i;
01028     Picture *pic;
01029     s->mb_skipped = 0;
01030 
01031     assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
01032 
01033     /* mark&release old frames */
01034     if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
01035       if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
01036           free_frame_buffer(s, s->last_picture_ptr);
01037 
01038         /* release forgotten pictures */
01039         /* if(mpeg124/h263) */
01040         if(!s->encoding){
01041             for(i=0; i<s->picture_count; i++){
01042                 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
01043                     av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
01044                     free_frame_buffer(s, &s->picture[i]);
01045                 }
01046             }
01047         }
01048       }
01049     }
01050 
01051     if(!s->encoding){
01052         ff_release_unused_pictures(s, 1);
01053 
01054         if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
01055             pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
01056         else{
01057             i= ff_find_unused_picture(s, 0);
01058             pic= &s->picture[i];
01059         }
01060 
01061         pic->reference= 0;
01062         if (!s->dropable){
01063             if (s->codec_id == CODEC_ID_H264)
01064                 pic->reference = s->picture_structure;
01065             else if (s->pict_type != AV_PICTURE_TYPE_B)
01066                 pic->reference = 3;
01067         }
01068 
01069         pic->coded_picture_number= s->coded_picture_number++;
01070 
01071         if(ff_alloc_picture(s, pic, 0) < 0)
01072             return -1;
01073 
01074         s->current_picture_ptr= pic;
01075         //FIXME use only the vars from current_pic
01076         s->current_picture_ptr->top_field_first= s->top_field_first;
01077         if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
01078             if(s->picture_structure != PICT_FRAME)
01079                 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
01080         }
01081         s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
01082         s->current_picture_ptr->field_picture= s->picture_structure != PICT_FRAME;
01083     }
01084 
01085     s->current_picture_ptr->pict_type= s->pict_type;
01086 //    if(s->flags && CODEC_FLAG_QSCALE)
01087   //      s->current_picture_ptr->quality= s->new_picture_ptr->quality;
01088     s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
01089 
01090     ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01091 
01092     if (s->pict_type != AV_PICTURE_TYPE_B) {
01093         s->last_picture_ptr= s->next_picture_ptr;
01094         if(!s->dropable)
01095             s->next_picture_ptr= s->current_picture_ptr;
01096     }
01097 /*    av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
01098         s->last_picture_ptr    ? s->last_picture_ptr->data[0] : NULL,
01099         s->next_picture_ptr    ? s->next_picture_ptr->data[0] : NULL,
01100         s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
01101         s->pict_type, s->dropable);*/
01102 
01103     if(s->codec_id != CODEC_ID_H264){
01104         if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
01105            (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
01106             if (s->pict_type != AV_PICTURE_TYPE_I)
01107                 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
01108             else if (s->picture_structure != PICT_FRAME)
01109                 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
01110 
01111             /* Allocate a dummy frame */
01112             i= ff_find_unused_picture(s, 0);
01113             s->last_picture_ptr= &s->picture[i];
01114             if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01115                 return -1;
01116             ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
01117             ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
01118         }
01119         if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
01120             /* Allocate a dummy frame */
01121             i= ff_find_unused_picture(s, 0);
01122             s->next_picture_ptr= &s->picture[i];
01123             if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01124                 return -1;
01125             ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
01126             ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
01127         }
01128     }
01129 
01130     if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01131     if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01132 
01133     assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
01134 
01135     if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
01136         int i;
01137         for(i=0; i<4; i++){
01138             if(s->picture_structure == PICT_BOTTOM_FIELD){
01139                  s->current_picture.data[i] += s->current_picture.linesize[i];
01140             }
01141             s->current_picture.linesize[i] *= 2;
01142             s->last_picture.linesize[i] *=2;
01143             s->next_picture.linesize[i] *=2;
01144         }
01145     }
01146 
01147     s->error_recognition= avctx->error_recognition;
01148 
01149     /* set dequantizer, we can't do it during init as it might change for mpeg4
01150        and we can't do it in the header decode as init is not called for mpeg4 there yet */
01151     if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
01152         s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01153         s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01154     }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
01155         s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01156         s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01157     }else{
01158         s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01159         s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01160     }
01161 
01162     if(s->dct_error_sum){
01163         assert(s->avctx->noise_reduction && s->encoding);
01164 
01165         update_noise_reduction(s);
01166     }
01167 
01168     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01169         return ff_xvmc_field_start(s, avctx);
01170 
01171     return 0;
01172 }
01173 
01174 /* generic function for encode/decode called after a frame has been coded/decoded */
01175 void MPV_frame_end(MpegEncContext *s)
01176 {
01177     int i;
01178     /* redraw edges for the frame if decoding didn't complete */
01179     //just to make sure that all data is rendered.
01180     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01181         ff_xvmc_field_end(s);
01182    }else if((s->error_count || s->encoding)
01183        && !s->avctx->hwaccel
01184        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
01185        && s->unrestricted_mv
01186        && s->current_picture.reference
01187        && !s->intra_only
01188        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
01189             int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
01190             int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
01191             s->dsp.draw_edges(s->current_picture.data[0], s->linesize  ,
01192                               s->h_edge_pos             , s->v_edge_pos,
01193                               EDGE_WIDTH        , EDGE_WIDTH        , EDGE_TOP | EDGE_BOTTOM);
01194             s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
01195                               s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
01196                               EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
01197             s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
01198                               s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
01199                               EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
01200     }
01201 
01202     emms_c();
01203 
01204     s->last_pict_type    = s->pict_type;
01205     s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
01206     if(s->pict_type!=AV_PICTURE_TYPE_B){
01207         s->last_non_b_pict_type= s->pict_type;
01208     }
01209 #if 0
01210         /* copy back current_picture variables */
01211     for(i=0; i<MAX_PICTURE_COUNT; i++){
01212         if(s->picture[i].data[0] == s->current_picture.data[0]){
01213             s->picture[i]= s->current_picture;
01214             break;
01215         }
01216     }
01217     assert(i<MAX_PICTURE_COUNT);
01218 #endif
01219 
01220     if(s->encoding){
01221         /* release non-reference frames */
01222         for(i=0; i<s->picture_count; i++){
01223             if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
01224                 free_frame_buffer(s, &s->picture[i]);
01225             }
01226         }
01227     }
01228     // clear copies, to avoid confusion
01229 #if 0
01230     memset(&s->last_picture, 0, sizeof(Picture));
01231     memset(&s->next_picture, 0, sizeof(Picture));
01232     memset(&s->current_picture, 0, sizeof(Picture));
01233 #endif
01234     s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
01235 
01236     if (s->codec_id != CODEC_ID_H264 && s->current_picture.reference) {
01237         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
01238     }
01239 }
01240 
01248 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01249     int x, y, fr, f;
01250 
01251     sx= av_clip(sx, 0, w-1);
01252     sy= av_clip(sy, 0, h-1);
01253     ex= av_clip(ex, 0, w-1);
01254     ey= av_clip(ey, 0, h-1);
01255 
01256     buf[sy*stride + sx]+= color;
01257 
01258     if(FFABS(ex - sx) > FFABS(ey - sy)){
01259         if(sx > ex){
01260             FFSWAP(int, sx, ex);
01261             FFSWAP(int, sy, ey);
01262         }
01263         buf+= sx + sy*stride;
01264         ex-= sx;
01265         f= ((ey-sy)<<16)/ex;
01266         for(x= 0; x <= ex; x++){
01267             y = (x*f)>>16;
01268             fr= (x*f)&0xFFFF;
01269             buf[ y   *stride + x]+= (color*(0x10000-fr))>>16;
01270             buf[(y+1)*stride + x]+= (color*         fr )>>16;
01271         }
01272     }else{
01273         if(sy > ey){
01274             FFSWAP(int, sx, ex);
01275             FFSWAP(int, sy, ey);
01276         }
01277         buf+= sx + sy*stride;
01278         ey-= sy;
01279         if(ey) f= ((ex-sx)<<16)/ey;
01280         else   f= 0;
01281         for(y= 0; y <= ey; y++){
01282             x = (y*f)>>16;
01283             fr= (y*f)&0xFFFF;
01284             buf[y*stride + x  ]+= (color*(0x10000-fr))>>16;
01285             buf[y*stride + x+1]+= (color*         fr )>>16;
01286         }
01287     }
01288 }
01289 
01297 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01298     int dx,dy;
01299 
01300     sx= av_clip(sx, -100, w+100);
01301     sy= av_clip(sy, -100, h+100);
01302     ex= av_clip(ex, -100, w+100);
01303     ey= av_clip(ey, -100, h+100);
01304 
01305     dx= ex - sx;
01306     dy= ey - sy;
01307 
01308     if(dx*dx + dy*dy > 3*3){
01309         int rx=  dx + dy;
01310         int ry= -dx + dy;
01311         int length= ff_sqrt((rx*rx + ry*ry)<<8);
01312 
01313         //FIXME subpixel accuracy
01314         rx= ROUNDED_DIV(rx*3<<4, length);
01315         ry= ROUNDED_DIV(ry*3<<4, length);
01316 
01317         draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01318         draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01319     }
01320     draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01321 }
01322 
01326 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
01327 
01328     if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
01329 
01330     if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
01331         int x,y;
01332 
01333         av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
01334         switch (pict->pict_type) {
01335             case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
01336             case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
01337             case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
01338             case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
01339             case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
01340             case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
01341         }
01342         for(y=0; y<s->mb_height; y++){
01343             for(x=0; x<s->mb_width; x++){
01344                 if(s->avctx->debug&FF_DEBUG_SKIP){
01345                     int count= s->mbskip_table[x + y*s->mb_stride];
01346                     if(count>9) count=9;
01347                     av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01348                 }
01349                 if(s->avctx->debug&FF_DEBUG_QP){
01350                     av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
01351                 }
01352                 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
01353                     int mb_type= pict->mb_type[x + y*s->mb_stride];
01354                     //Type & MV direction
01355                     if(IS_PCM(mb_type))
01356                         av_log(s->avctx, AV_LOG_DEBUG, "P");
01357                     else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01358                         av_log(s->avctx, AV_LOG_DEBUG, "A");
01359                     else if(IS_INTRA4x4(mb_type))
01360                         av_log(s->avctx, AV_LOG_DEBUG, "i");
01361                     else if(IS_INTRA16x16(mb_type))
01362                         av_log(s->avctx, AV_LOG_DEBUG, "I");
01363                     else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01364                         av_log(s->avctx, AV_LOG_DEBUG, "d");
01365                     else if(IS_DIRECT(mb_type))
01366                         av_log(s->avctx, AV_LOG_DEBUG, "D");
01367                     else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
01368                         av_log(s->avctx, AV_LOG_DEBUG, "g");
01369                     else if(IS_GMC(mb_type))
01370                         av_log(s->avctx, AV_LOG_DEBUG, "G");
01371                     else if(IS_SKIP(mb_type))
01372                         av_log(s->avctx, AV_LOG_DEBUG, "S");
01373                     else if(!USES_LIST(mb_type, 1))
01374                         av_log(s->avctx, AV_LOG_DEBUG, ">");
01375                     else if(!USES_LIST(mb_type, 0))
01376                         av_log(s->avctx, AV_LOG_DEBUG, "<");
01377                     else{
01378                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01379                         av_log(s->avctx, AV_LOG_DEBUG, "X");
01380                     }
01381 
01382                     //segmentation
01383                     if(IS_8X8(mb_type))
01384                         av_log(s->avctx, AV_LOG_DEBUG, "+");
01385                     else if(IS_16X8(mb_type))
01386                         av_log(s->avctx, AV_LOG_DEBUG, "-");
01387                     else if(IS_8X16(mb_type))
01388                         av_log(s->avctx, AV_LOG_DEBUG, "|");
01389                     else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
01390                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01391                     else
01392                         av_log(s->avctx, AV_LOG_DEBUG, "?");
01393 
01394 
01395                     if(IS_INTERLACED(mb_type))
01396                         av_log(s->avctx, AV_LOG_DEBUG, "=");
01397                     else
01398                         av_log(s->avctx, AV_LOG_DEBUG, " ");
01399                 }
01400 //                av_log(s->avctx, AV_LOG_DEBUG, " ");
01401             }
01402             av_log(s->avctx, AV_LOG_DEBUG, "\n");
01403         }
01404     }
01405 
01406     if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
01407         const int shift= 1 + s->quarter_sample;
01408         int mb_y;
01409         uint8_t *ptr;
01410         int i;
01411         int h_chroma_shift, v_chroma_shift, block_height;
01412         const int width = s->avctx->width;
01413         const int height= s->avctx->height;
01414         const int mv_sample_log2= 4 - pict->motion_subsample_log2;
01415         const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01416         s->low_delay=0; //needed to see the vectors without trashing the buffers
01417 
01418         avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01419         for(i=0; i<3; i++){
01420             memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
01421             pict->data[i]= s->visualization_buffer[i];
01422         }
01423         pict->type= FF_BUFFER_TYPE_COPY;
01424         ptr= pict->data[0];
01425         block_height = 16>>v_chroma_shift;
01426 
01427         for(mb_y=0; mb_y<s->mb_height; mb_y++){
01428             int mb_x;
01429             for(mb_x=0; mb_x<s->mb_width; mb_x++){
01430                 const int mb_index= mb_x + mb_y*s->mb_stride;
01431                 if((s->avctx->debug_mv) && pict->motion_val){
01432                   int type;
01433                   for(type=0; type<3; type++){
01434                     int direction = 0;
01435                     switch (type) {
01436                       case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
01437                                 continue;
01438                               direction = 0;
01439                               break;
01440                       case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
01441                                 continue;
01442                               direction = 0;
01443                               break;
01444                       case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
01445                                 continue;
01446                               direction = 1;
01447                               break;
01448                     }
01449                     if(!USES_LIST(pict->mb_type[mb_index], direction))
01450                         continue;
01451 
01452                     if(IS_8X8(pict->mb_type[mb_index])){
01453                       int i;
01454                       for(i=0; i<4; i++){
01455                         int sx= mb_x*16 + 4 + 8*(i&1);
01456                         int sy= mb_y*16 + 4 + 8*(i>>1);
01457                         int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01458                         int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01459                         int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01460                         draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01461                       }
01462                     }else if(IS_16X8(pict->mb_type[mb_index])){
01463                       int i;
01464                       for(i=0; i<2; i++){
01465                         int sx=mb_x*16 + 8;
01466                         int sy=mb_y*16 + 4 + 8*i;
01467                         int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
01468                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01469                         int my=(pict->motion_val[direction][xy][1]>>shift);
01470 
01471                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01472                             my*=2;
01473 
01474                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01475                       }
01476                     }else if(IS_8X16(pict->mb_type[mb_index])){
01477                       int i;
01478                       for(i=0; i<2; i++){
01479                         int sx=mb_x*16 + 4 + 8*i;
01480                         int sy=mb_y*16 + 8;
01481                         int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
01482                         int mx=(pict->motion_val[direction][xy][0]>>shift);
01483                         int my=(pict->motion_val[direction][xy][1]>>shift);
01484 
01485                         if(IS_INTERLACED(pict->mb_type[mb_index]))
01486                             my*=2;
01487 
01488                         draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01489                       }
01490                     }else{
01491                       int sx= mb_x*16 + 8;
01492                       int sy= mb_y*16 + 8;
01493                       int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
01494                       int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01495                       int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01496                       draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01497                     }
01498                   }
01499                 }
01500                 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
01501                     uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
01502                     int y;
01503                     for(y=0; y<block_height; y++){
01504                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
01505                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
01506                     }
01507                 }
01508                 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
01509                     int mb_type= pict->mb_type[mb_index];
01510                     uint64_t u,v;
01511                     int y;
01512 #define COLOR(theta, r)\
01513 u= (int)(128 + r*cos(theta*3.141592/180));\
01514 v= (int)(128 + r*sin(theta*3.141592/180));
01515 
01516 
01517                     u=v=128;
01518                     if(IS_PCM(mb_type)){
01519                         COLOR(120,48)
01520                     }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
01521                         COLOR(30,48)
01522                     }else if(IS_INTRA4x4(mb_type)){
01523                         COLOR(90,48)
01524                     }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
01525 //                        COLOR(120,48)
01526                     }else if(IS_DIRECT(mb_type)){
01527                         COLOR(150,48)
01528                     }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
01529                         COLOR(170,48)
01530                     }else if(IS_GMC(mb_type)){
01531                         COLOR(190,48)
01532                     }else if(IS_SKIP(mb_type)){
01533 //                        COLOR(180,48)
01534                     }else if(!USES_LIST(mb_type, 1)){
01535                         COLOR(240,48)
01536                     }else if(!USES_LIST(mb_type, 0)){
01537                         COLOR(0,48)
01538                     }else{
01539                         assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01540                         COLOR(300,48)
01541                     }
01542 
01543                     u*= 0x0101010101010101ULL;
01544                     v*= 0x0101010101010101ULL;
01545                     for(y=0; y<block_height; y++){
01546                         *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
01547                         *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
01548                     }
01549 
01550                     //segmentation
01551                     if(IS_8X8(mb_type) || IS_16X8(mb_type)){
01552                         *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01553                         *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01554                     }
01555                     if(IS_8X8(mb_type) || IS_8X16(mb_type)){
01556                         for(y=0; y<16; y++)
01557                             pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
01558                     }
01559                     if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
01560                         int dm= 1 << (mv_sample_log2-2);
01561                         for(i=0; i<4; i++){
01562                             int sx= mb_x*16 + 8*(i&1);
01563                             int sy= mb_y*16 + 8*(i>>1);
01564                             int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01565                             //FIXME bidir
01566                             int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
01567                             if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
01568                                 for(y=0; y<8; y++)
01569                                     pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
01570                             if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
01571                                 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
01572                         }
01573                     }
01574 
01575                     if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
01576                         // hmm
01577                     }
01578                 }
01579                 s->mbskip_table[mb_index]=0;
01580             }
01581         }
01582     }
01583 }
01584 
01585 static inline int hpel_motion_lowres(MpegEncContext *s,
01586                                   uint8_t *dest, uint8_t *src,
01587                                   int field_based, int field_select,
01588                                   int src_x, int src_y,
01589                                   int width, int height, int stride,
01590                                   int h_edge_pos, int v_edge_pos,
01591                                   int w, int h, h264_chroma_mc_func *pix_op,
01592                                   int motion_x, int motion_y)
01593 {
01594     const int lowres= s->avctx->lowres;
01595     const int op_index= FFMIN(lowres, 2);
01596     const int s_mask= (2<<lowres)-1;
01597     int emu=0;
01598     int sx, sy;
01599 
01600     if(s->quarter_sample){
01601         motion_x/=2;
01602         motion_y/=2;
01603     }
01604 
01605     sx= motion_x & s_mask;
01606     sy= motion_y & s_mask;
01607     src_x += motion_x >> (lowres+1);
01608     src_y += motion_y >> (lowres+1);
01609 
01610     src += src_y * stride + src_x;
01611 
01612     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - w
01613        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01614         s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
01615                             src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01616         src= s->edge_emu_buffer;
01617         emu=1;
01618     }
01619 
01620     sx= (sx << 2) >> lowres;
01621     sy= (sy << 2) >> lowres;
01622     if(field_select)
01623         src += s->linesize;
01624     pix_op[op_index](dest, src, stride, h, sx, sy);
01625     return emu;
01626 }
01627 
01628 /* apply one mpeg motion vector to the three components */
01629 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01630                                uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01631                                int field_based, int bottom_field, int field_select,
01632                                uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
01633                                int motion_x, int motion_y, int h, int mb_y)
01634 {
01635     uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01636     int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
01637     const int lowres= s->avctx->lowres;
01638     const int op_index= FFMIN(lowres, 2);
01639     const int block_s= 8>>lowres;
01640     const int s_mask= (2<<lowres)-1;
01641     const int h_edge_pos = s->h_edge_pos >> lowres;
01642     const int v_edge_pos = s->v_edge_pos >> lowres;
01643     linesize   = s->current_picture.linesize[0] << field_based;
01644     uvlinesize = s->current_picture.linesize[1] << field_based;
01645 
01646     if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
01647         motion_x/=2;
01648         motion_y/=2;
01649     }
01650 
01651     if(field_based){
01652         motion_y += (bottom_field - field_select)*((1<<lowres)-1);
01653     }
01654 
01655     sx= motion_x & s_mask;
01656     sy= motion_y & s_mask;
01657     src_x = s->mb_x*2*block_s               + (motion_x >> (lowres+1));
01658     src_y =(   mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
01659 
01660     if (s->out_format == FMT_H263) {
01661         uvsx = ((motion_x>>1) & s_mask) | (sx&1);
01662         uvsy = ((motion_y>>1) & s_mask) | (sy&1);
01663         uvsrc_x = src_x>>1;
01664         uvsrc_y = src_y>>1;
01665     }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
01666         mx = motion_x / 4;
01667         my = motion_y / 4;
01668         uvsx = (2*mx) & s_mask;
01669         uvsy = (2*my) & s_mask;
01670         uvsrc_x = s->mb_x*block_s               + (mx >> lowres);
01671         uvsrc_y =    mb_y*block_s               + (my >> lowres);
01672     } else {
01673         mx = motion_x / 2;
01674         my = motion_y / 2;
01675         uvsx = mx & s_mask;
01676         uvsy = my & s_mask;
01677         uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
01678         uvsrc_y =(   mb_y*block_s>>field_based) + (my >> (lowres+1));
01679     }
01680 
01681     ptr_y  = ref_picture[0] + src_y * linesize + src_x;
01682     ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01683     ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01684 
01685     if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - 2*block_s
01686        || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01687             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
01688                              src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01689             ptr_y = s->edge_emu_buffer;
01690             if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01691                 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
01692                 s->dsp.emulated_edge_mc(uvbuf  , ptr_cb, s->uvlinesize, 9, 9+field_based,
01693                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01694                 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
01695                                  uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01696                 ptr_cb= uvbuf;
01697                 ptr_cr= uvbuf+16;
01698             }
01699     }
01700 
01701     if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
01702         dest_y += s->linesize;
01703         dest_cb+= s->uvlinesize;
01704         dest_cr+= s->uvlinesize;
01705     }
01706 
01707     if(field_select){
01708         ptr_y += s->linesize;
01709         ptr_cb+= s->uvlinesize;
01710         ptr_cr+= s->uvlinesize;
01711     }
01712 
01713     sx= (sx << 2) >> lowres;
01714     sy= (sy << 2) >> lowres;
01715     pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
01716 
01717     if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01718         uvsx= (uvsx << 2) >> lowres;
01719         uvsy= (uvsy << 2) >> lowres;
01720         pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01721         pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01722     }
01723     //FIXME h261 lowres loop filter
01724 }
01725 
01726 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01727                                      uint8_t *dest_cb, uint8_t *dest_cr,
01728                                      uint8_t **ref_picture,
01729                                      h264_chroma_mc_func *pix_op,
01730                                      int mx, int my){
01731     const int lowres= s->avctx->lowres;
01732     const int op_index= FFMIN(lowres, 2);
01733     const int block_s= 8>>lowres;
01734     const int s_mask= (2<<lowres)-1;
01735     const int h_edge_pos = s->h_edge_pos >> (lowres+1);
01736     const int v_edge_pos = s->v_edge_pos >> (lowres+1);
01737     int emu=0, src_x, src_y, offset, sx, sy;
01738     uint8_t *ptr;
01739 
01740     if(s->quarter_sample){
01741         mx/=2;
01742         my/=2;
01743     }
01744 
01745     /* In case of 8X8, we construct a single chroma motion vector
01746        with a special rounding */
01747     mx= ff_h263_round_chroma(mx);
01748     my= ff_h263_round_chroma(my);
01749 
01750     sx= mx & s_mask;
01751     sy= my & s_mask;
01752     src_x = s->mb_x*block_s + (mx >> (lowres+1));
01753     src_y = s->mb_y*block_s + (my >> (lowres+1));
01754 
01755     offset = src_y * s->uvlinesize + src_x;
01756     ptr = ref_picture[1] + offset;
01757     if(s->flags&CODEC_FLAG_EMU_EDGE){
01758         if(   (unsigned)src_x > h_edge_pos - (!!sx) - block_s
01759            || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
01760             s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01761             ptr= s->edge_emu_buffer;
01762             emu=1;
01763         }
01764     }
01765     sx= (sx << 2) >> lowres;
01766     sy= (sy << 2) >> lowres;
01767     pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
01768 
01769     ptr = ref_picture[2] + offset;
01770     if(emu){
01771         s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01772         ptr= s->edge_emu_buffer;
01773     }
01774     pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
01775 }
01776 
01788 static inline void MPV_motion_lowres(MpegEncContext *s,
01789                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01790                               int dir, uint8_t **ref_picture,
01791                               h264_chroma_mc_func *pix_op)
01792 {
01793     int mx, my;
01794     int mb_x, mb_y, i;
01795     const int lowres= s->avctx->lowres;
01796     const int block_s= 8>>lowres;
01797 
01798     mb_x = s->mb_x;
01799     mb_y = s->mb_y;
01800 
01801     switch(s->mv_type) {
01802     case MV_TYPE_16X16:
01803         mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01804                     0, 0, 0,
01805                     ref_picture, pix_op,
01806                     s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
01807         break;
01808     case MV_TYPE_8X8:
01809         mx = 0;
01810         my = 0;
01811             for(i=0;i<4;i++) {
01812                 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
01813                             ref_picture[0], 0, 0,
01814                             (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
01815                             s->width, s->height, s->linesize,
01816                             s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
01817                             block_s, block_s, pix_op,
01818                             s->mv[dir][i][0], s->mv[dir][i][1]);
01819 
01820                 mx += s->mv[dir][i][0];
01821                 my += s->mv[dir][i][1];
01822             }
01823 
01824         if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
01825             chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
01826         break;
01827     case MV_TYPE_FIELD:
01828         if (s->picture_structure == PICT_FRAME) {
01829             /* top field */
01830             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01831                         1, 0, s->field_select[dir][0],
01832                         ref_picture, pix_op,
01833                         s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
01834             /* bottom field */
01835             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01836                         1, 1, s->field_select[dir][1],
01837                         ref_picture, pix_op,
01838                         s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
01839         } else {
01840             if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
01841                 ref_picture= s->current_picture_ptr->data;
01842             }
01843 
01844             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01845                         0, 0, s->field_select[dir][0],
01846                         ref_picture, pix_op,
01847                         s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
01848         }
01849         break;
01850     case MV_TYPE_16X8:
01851         for(i=0; i<2; i++){
01852             uint8_t ** ref2picture;
01853 
01854             if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
01855                 ref2picture= ref_picture;
01856             }else{
01857                 ref2picture= s->current_picture_ptr->data;
01858             }
01859 
01860             mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01861                         0, 0, s->field_select[dir][i],
01862                         ref2picture, pix_op,
01863                         s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
01864 
01865             dest_y += 2*block_s*s->linesize;
01866             dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01867             dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01868         }
01869         break;
01870     case MV_TYPE_DMV:
01871         if(s->picture_structure == PICT_FRAME){
01872             for(i=0; i<2; i++){
01873                 int j;
01874                 for(j=0; j<2; j++){
01875                     mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01876                                 1, j, j^i,
01877                                 ref_picture, pix_op,
01878                                 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
01879                 }
01880                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01881             }
01882         }else{
01883             for(i=0; i<2; i++){
01884                 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01885                             0, 0, s->picture_structure != i+1,
01886                             ref_picture, pix_op,
01887                             s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
01888 
01889                 // after put we make avg of the same block
01890                 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01891 
01892                 //opposite parity is always in the same frame if this is second field
01893                 if(!s->first_field){
01894                     ref_picture = s->current_picture_ptr->data;
01895                 }
01896             }
01897         }
01898     break;
01899     default: assert(0);
01900     }
01901 }
01902 
01906 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
01907 {
01908     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
01909     int my, off, i, mvs;
01910 
01911     if (s->picture_structure != PICT_FRAME) goto unhandled;
01912 
01913     switch (s->mv_type) {
01914         case MV_TYPE_16X16:
01915             mvs = 1;
01916             break;
01917         case MV_TYPE_16X8:
01918             mvs = 2;
01919             break;
01920         case MV_TYPE_8X8:
01921             mvs = 4;
01922             break;
01923         default:
01924             goto unhandled;
01925     }
01926 
01927     for (i = 0; i < mvs; i++) {
01928         my = s->mv[dir][i][1]<<qpel_shift;
01929         my_max = FFMAX(my_max, my);
01930         my_min = FFMIN(my_min, my);
01931     }
01932 
01933     off = (FFMAX(-my_min, my_max) + 63) >> 6;
01934 
01935     return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
01936 unhandled:
01937     return s->mb_height-1;
01938 }
01939 
01940 /* put block[] to dest[] */
01941 static inline void put_dct(MpegEncContext *s,
01942                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01943 {
01944     s->dct_unquantize_intra(s, block, i, qscale);
01945     s->dsp.idct_put (dest, line_size, block);
01946 }
01947 
01948 /* add block[] to dest[] */
01949 static inline void add_dct(MpegEncContext *s,
01950                            DCTELEM *block, int i, uint8_t *dest, int line_size)
01951 {
01952     if (s->block_last_index[i] >= 0) {
01953         s->dsp.idct_add (dest, line_size, block);
01954     }
01955 }
01956 
01957 static inline void add_dequant_dct(MpegEncContext *s,
01958                            DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01959 {
01960     if (s->block_last_index[i] >= 0) {
01961         s->dct_unquantize_inter(s, block, i, qscale);
01962 
01963         s->dsp.idct_add (dest, line_size, block);
01964     }
01965 }
01966 
01970 void ff_clean_intra_table_entries(MpegEncContext *s)
01971 {
01972     int wrap = s->b8_stride;
01973     int xy = s->block_index[0];
01974 
01975     s->dc_val[0][xy           ] =
01976     s->dc_val[0][xy + 1       ] =
01977     s->dc_val[0][xy     + wrap] =
01978     s->dc_val[0][xy + 1 + wrap] = 1024;
01979     /* ac pred */
01980     memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
01981     memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
01982     if (s->msmpeg4_version>=3) {
01983         s->coded_block[xy           ] =
01984         s->coded_block[xy + 1       ] =
01985         s->coded_block[xy     + wrap] =
01986         s->coded_block[xy + 1 + wrap] = 0;
01987     }
01988     /* chroma */
01989     wrap = s->mb_stride;
01990     xy = s->mb_x + s->mb_y * wrap;
01991     s->dc_val[1][xy] =
01992     s->dc_val[2][xy] = 1024;
01993     /* ac pred */
01994     memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
01995     memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
01996 
01997     s->mbintra_table[xy]= 0;
01998 }
01999 
02000 /* generic function called after a macroblock has been parsed by the
02001    decoder or after it has been encoded by the encoder.
02002 
02003    Important variables used:
02004    s->mb_intra : true if intra macroblock
02005    s->mv_dir   : motion vector direction
02006    s->mv_type  : motion vector type
02007    s->mv       : motion vector
02008    s->interlaced_dct : true if interlaced dct used (mpeg2)
02009  */
02010 static av_always_inline
02011 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
02012                             int lowres_flag, int is_mpeg12)
02013 {
02014     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
02015     if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
02016         ff_xvmc_decode_mb(s);//xvmc uses pblocks
02017         return;
02018     }
02019 
02020     if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
02021        /* save DCT coefficients */
02022        int i,j;
02023        DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
02024        av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
02025        for(i=0; i<6; i++){
02026            for(j=0; j<64; j++){
02027                *dct++ = block[i][s->dsp.idct_permutation[j]];
02028                av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
02029            }
02030            av_log(s->avctx, AV_LOG_DEBUG, "\n");
02031        }
02032     }
02033 
02034     s->current_picture.qscale_table[mb_xy]= s->qscale;
02035 
02036     /* update DC predictors for P macroblocks */
02037     if (!s->mb_intra) {
02038         if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
02039             if(s->mbintra_table[mb_xy])
02040                 ff_clean_intra_table_entries(s);
02041         } else {
02042             s->last_dc[0] =
02043             s->last_dc[1] =
02044             s->last_dc[2] = 128 << s->intra_dc_precision;
02045         }
02046     }
02047     else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
02048         s->mbintra_table[mb_xy]=1;
02049 
02050     if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
02051         uint8_t *dest_y, *dest_cb, *dest_cr;
02052         int dct_linesize, dct_offset;
02053         op_pixels_func (*op_pix)[4];
02054         qpel_mc_func (*op_qpix)[16];
02055         const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
02056         const int uvlinesize= s->current_picture.linesize[1];
02057         const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
02058         const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
02059 
02060         /* avoid copy if macroblock skipped in last frame too */
02061         /* skip only during decoding as we might trash the buffers during encoding a bit */
02062         if(!s->encoding){
02063             uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
02064             const int age= s->current_picture.age;
02065 
02066             assert(age);
02067 
02068             if (s->mb_skipped) {
02069                 s->mb_skipped= 0;
02070                 assert(s->pict_type!=AV_PICTURE_TYPE_I);
02071 
02072                 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
02073                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
02074 
02075                 /* if previous was skipped too, then nothing to do !  */
02076                 if (*mbskip_ptr >= age && s->current_picture.reference){
02077                     return;
02078                 }
02079             } else if(!s->current_picture.reference){
02080                 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
02081                 if(*mbskip_ptr >99) *mbskip_ptr= 99;
02082             } else{
02083                 *mbskip_ptr = 0; /* not skipped */
02084             }
02085         }
02086 
02087         dct_linesize = linesize << s->interlaced_dct;
02088         dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
02089 
02090         if(readable){
02091             dest_y=  s->dest[0];
02092             dest_cb= s->dest[1];
02093             dest_cr= s->dest[2];
02094         }else{
02095             dest_y = s->b_scratchpad;
02096             dest_cb= s->b_scratchpad+16*linesize;
02097             dest_cr= s->b_scratchpad+32*linesize;
02098         }
02099 
02100         if (!s->mb_intra) {
02101             /* motion handling */
02102             /* decoding or more than one mb_type (MC was already done otherwise) */
02103             if(!s->encoding){
02104 
02105                 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
02106                     if (s->mv_dir & MV_DIR_FORWARD) {
02107                         ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
02108                     }
02109                     if (s->mv_dir & MV_DIR_BACKWARD) {
02110                         ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
02111                     }
02112                 }
02113 
02114                 if(lowres_flag){
02115                     h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
02116 
02117                     if (s->mv_dir & MV_DIR_FORWARD) {
02118                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
02119                         op_pix = s->dsp.avg_h264_chroma_pixels_tab;
02120                     }
02121                     if (s->mv_dir & MV_DIR_BACKWARD) {
02122                         MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
02123                     }
02124                 }else{
02125                     op_qpix= s->me.qpel_put;
02126                     if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
02127                         op_pix = s->dsp.put_pixels_tab;
02128                     }else{
02129                         op_pix = s->dsp.put_no_rnd_pixels_tab;
02130                     }
02131                     if (s->mv_dir & MV_DIR_FORWARD) {
02132                         MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
02133                         op_pix = s->dsp.avg_pixels_tab;
02134                         op_qpix= s->me.qpel_avg;
02135                     }
02136                     if (s->mv_dir & MV_DIR_BACKWARD) {
02137                         MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
02138                     }
02139                 }
02140             }
02141 
02142             /* skip dequant / idct if we are really late ;) */
02143             if(s->avctx->skip_idct){
02144                 if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
02145                    ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
02146                    || s->avctx->skip_idct >= AVDISCARD_ALL)
02147                     goto skip_idct;
02148             }
02149 
02150             /* add dct residue */
02151             if(s->encoding || !(   s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
02152                                 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
02153                 add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02154                 add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02155                 add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02156                 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02157 
02158                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02159                     if (s->chroma_y_shift){
02160                         add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02161                         add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02162                     }else{
02163                         dct_linesize >>= 1;
02164                         dct_offset >>=1;
02165                         add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02166                         add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02167                         add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02168                         add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02169                     }
02170                 }
02171             } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
02172                 add_dct(s, block[0], 0, dest_y                          , dct_linesize);
02173                 add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
02174                 add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
02175                 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
02176 
02177                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02178                     if(s->chroma_y_shift){//Chroma420
02179                         add_dct(s, block[4], 4, dest_cb, uvlinesize);
02180                         add_dct(s, block[5], 5, dest_cr, uvlinesize);
02181                     }else{
02182                         //chroma422
02183                         dct_linesize = uvlinesize << s->interlaced_dct;
02184                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
02185 
02186                         add_dct(s, block[4], 4, dest_cb, dct_linesize);
02187                         add_dct(s, block[5], 5, dest_cr, dct_linesize);
02188                         add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02189                         add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02190                         if(!s->chroma_x_shift){//Chroma444
02191                             add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
02192                             add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
02193                             add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
02194                             add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
02195                         }
02196                     }
02197                 }//fi gray
02198             }
02199             else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02200                 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02201             }
02202         } else {
02203             /* dct only in intra block */
02204             if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02205                 put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
02206                 put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
02207                 put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
02208                 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02209 
02210                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02211                     if(s->chroma_y_shift){
02212                         put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02213                         put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02214                     }else{
02215                         dct_offset >>=1;
02216                         dct_linesize >>=1;
02217                         put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
02218                         put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
02219                         put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02220                         put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02221                     }
02222                 }
02223             }else{
02224                 s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
02225                 s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
02226                 s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
02227                 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02228 
02229                 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02230                     if(s->chroma_y_shift){
02231                         s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02232                         s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02233                     }else{
02234 
02235                         dct_linesize = uvlinesize << s->interlaced_dct;
02236                         dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
02237 
02238                         s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
02239                         s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
02240                         s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02241                         s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02242                         if(!s->chroma_x_shift){//Chroma444
02243                             s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
02244                             s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
02245                             s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
02246                             s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
02247                         }
02248                     }
02249                 }//gray
02250             }
02251         }
02252 skip_idct:
02253         if(!readable){
02254             s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
02255             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02256             s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02257         }
02258     }
02259 }
02260 
02261 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02262 #if !CONFIG_SMALL
02263     if(s->out_format == FMT_MPEG1) {
02264         if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02265         else                 MPV_decode_mb_internal(s, block, 0, 1);
02266     } else
02267 #endif
02268     if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02269     else                  MPV_decode_mb_internal(s, block, 0, 0);
02270 }
02271 
02276 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02277     const int field_pic= s->picture_structure != PICT_FRAME;
02278     if(field_pic){
02279         h <<= 1;
02280         y <<= 1;
02281     }
02282 
02283     if (!s->avctx->hwaccel
02284        && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
02285        && s->unrestricted_mv
02286        && s->current_picture.reference
02287        && !s->intra_only
02288        && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
02289         int sides = 0, edge_h;
02290         int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
02291         int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
02292         if (y==0) sides |= EDGE_TOP;
02293         if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
02294 
02295         edge_h= FFMIN(h, s->v_edge_pos - y);
02296 
02297         s->dsp.draw_edges(s->current_picture_ptr->data[0] +  y         *s->linesize  , s->linesize,
02298                           s->h_edge_pos        , edge_h        , EDGE_WIDTH        , EDGE_WIDTH        , sides);
02299         s->dsp.draw_edges(s->current_picture_ptr->data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
02300                           s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02301         s->dsp.draw_edges(s->current_picture_ptr->data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
02302                           s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
02303     }
02304 
02305     h= FFMIN(h, s->avctx->height - y);
02306 
02307     if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
02308 
02309     if (s->avctx->draw_horiz_band) {
02310         AVFrame *src;
02311         int offset[4];
02312 
02313         if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02314             src= (AVFrame*)s->current_picture_ptr;
02315         else if(s->last_picture_ptr)
02316             src= (AVFrame*)s->last_picture_ptr;
02317         else
02318             return;
02319 
02320         if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02321             offset[0]=
02322             offset[1]=
02323             offset[2]=
02324             offset[3]= 0;
02325         }else{
02326             offset[0]= y * s->linesize;
02327             offset[1]=
02328             offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02329             offset[3]= 0;
02330         }
02331 
02332         emms_c();
02333 
02334         s->avctx->draw_horiz_band(s->avctx, src, offset,
02335                                   y, s->picture_structure, h);
02336     }
02337 }
02338 
02339 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
02340     const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
02341     const int uvlinesize= s->current_picture.linesize[1];
02342     const int mb_size= 4 - s->avctx->lowres;
02343 
02344     s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
02345     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
02346     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02347     s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02348     s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02349     s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02350     //block_index is not used by mpeg2, so it is not affected by chroma_format
02351 
02352     s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
02353     s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02354     s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02355 
02356     if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02357     {
02358         if(s->picture_structure==PICT_FRAME){
02359         s->dest[0] += s->mb_y *   linesize << mb_size;
02360         s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02361         s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02362         }else{
02363             s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
02364             s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02365             s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02366             assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02367         }
02368     }
02369 }
02370 
02371 void ff_mpeg_flush(AVCodecContext *avctx){
02372     int i;
02373     MpegEncContext *s = avctx->priv_data;
02374 
02375     if(s==NULL || s->picture==NULL)
02376         return;
02377 
02378     for(i=0; i<s->picture_count; i++){
02379        if(s->picture[i].data[0] && (   s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
02380                                     || s->picture[i].type == FF_BUFFER_TYPE_USER))
02381         free_frame_buffer(s, &s->picture[i]);
02382     }
02383     s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02384 
02385     s->mb_x= s->mb_y= 0;
02386     s->closed_gop= 0;
02387 
02388     s->parse_context.state= -1;
02389     s->parse_context.frame_start_found= 0;
02390     s->parse_context.overread= 0;
02391     s->parse_context.overread_index= 0;
02392     s->parse_context.index= 0;
02393     s->parse_context.last_index= 0;
02394     s->bitstream_buffer_size=0;
02395     s->pp_time=0;
02396 }
02397 
02398 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02399                                    DCTELEM *block, int n, int qscale)
02400 {
02401     int i, level, nCoeffs;
02402     const uint16_t *quant_matrix;
02403 
02404     nCoeffs= s->block_last_index[n];
02405 
02406     if (n < 4)
02407         block[0] = block[0] * s->y_dc_scale;
02408     else
02409         block[0] = block[0] * s->c_dc_scale;
02410     /* XXX: only mpeg1 */
02411     quant_matrix = s->intra_matrix;
02412     for(i=1;i<=nCoeffs;i++) {
02413         int j= s->intra_scantable.permutated[i];
02414         level = block[j];
02415         if (level) {
02416             if (level < 0) {
02417                 level = -level;
02418                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02419                 level = (level - 1) | 1;
02420                 level = -level;
02421             } else {
02422                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02423                 level = (level - 1) | 1;
02424             }
02425             block[j] = level;
02426         }
02427     }
02428 }
02429 
02430 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02431                                    DCTELEM *block, int n, int qscale)
02432 {
02433     int i, level, nCoeffs;
02434     const uint16_t *quant_matrix;
02435 
02436     nCoeffs= s->block_last_index[n];
02437 
02438     quant_matrix = s->inter_matrix;
02439     for(i=0; i<=nCoeffs; i++) {
02440         int j= s->intra_scantable.permutated[i];
02441         level = block[j];
02442         if (level) {
02443             if (level < 0) {
02444                 level = -level;
02445                 level = (((level << 1) + 1) * qscale *
02446                          ((int) (quant_matrix[j]))) >> 4;
02447                 level = (level - 1) | 1;
02448                 level = -level;
02449             } else {
02450                 level = (((level << 1) + 1) * qscale *
02451                          ((int) (quant_matrix[j]))) >> 4;
02452                 level = (level - 1) | 1;
02453             }
02454             block[j] = level;
02455         }
02456     }
02457 }
02458 
02459 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02460                                    DCTELEM *block, int n, int qscale)
02461 {
02462     int i, level, nCoeffs;
02463     const uint16_t *quant_matrix;
02464 
02465     if(s->alternate_scan) nCoeffs= 63;
02466     else nCoeffs= s->block_last_index[n];
02467 
02468     if (n < 4)
02469         block[0] = block[0] * s->y_dc_scale;
02470     else
02471         block[0] = block[0] * s->c_dc_scale;
02472     quant_matrix = s->intra_matrix;
02473     for(i=1;i<=nCoeffs;i++) {
02474         int j= s->intra_scantable.permutated[i];
02475         level = block[j];
02476         if (level) {
02477             if (level < 0) {
02478                 level = -level;
02479                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02480                 level = -level;
02481             } else {
02482                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02483             }
02484             block[j] = level;
02485         }
02486     }
02487 }
02488 
02489 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02490                                    DCTELEM *block, int n, int qscale)
02491 {
02492     int i, level, nCoeffs;
02493     const uint16_t *quant_matrix;
02494     int sum=-1;
02495 
02496     if(s->alternate_scan) nCoeffs= 63;
02497     else nCoeffs= s->block_last_index[n];
02498 
02499     if (n < 4)
02500         block[0] = block[0] * s->y_dc_scale;
02501     else
02502         block[0] = block[0] * s->c_dc_scale;
02503     quant_matrix = s->intra_matrix;
02504     for(i=1;i<=nCoeffs;i++) {
02505         int j= s->intra_scantable.permutated[i];
02506         level = block[j];
02507         if (level) {
02508             if (level < 0) {
02509                 level = -level;
02510                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02511                 level = -level;
02512             } else {
02513                 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02514             }
02515             block[j] = level;
02516             sum+=level;
02517         }
02518     }
02519     block[63]^=sum&1;
02520 }
02521 
02522 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02523                                    DCTELEM *block, int n, int qscale)
02524 {
02525     int i, level, nCoeffs;
02526     const uint16_t *quant_matrix;
02527     int sum=-1;
02528 
02529     if(s->alternate_scan) nCoeffs= 63;
02530     else nCoeffs= s->block_last_index[n];
02531 
02532     quant_matrix = s->inter_matrix;
02533     for(i=0; i<=nCoeffs; i++) {
02534         int j= s->intra_scantable.permutated[i];
02535         level = block[j];
02536         if (level) {
02537             if (level < 0) {
02538                 level = -level;
02539                 level = (((level << 1) + 1) * qscale *
02540                          ((int) (quant_matrix[j]))) >> 4;
02541                 level = -level;
02542             } else {
02543                 level = (((level << 1) + 1) * qscale *
02544                          ((int) (quant_matrix[j]))) >> 4;
02545             }
02546             block[j] = level;
02547             sum+=level;
02548         }
02549     }
02550     block[63]^=sum&1;
02551 }
02552 
02553 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02554                                   DCTELEM *block, int n, int qscale)
02555 {
02556     int i, level, qmul, qadd;
02557     int nCoeffs;
02558 
02559     assert(s->block_last_index[n]>=0);
02560 
02561     qmul = qscale << 1;
02562 
02563     if (!s->h263_aic) {
02564         if (n < 4)
02565             block[0] = block[0] * s->y_dc_scale;
02566         else
02567             block[0] = block[0] * s->c_dc_scale;
02568         qadd = (qscale - 1) | 1;
02569     }else{
02570         qadd = 0;
02571     }
02572     if(s->ac_pred)
02573         nCoeffs=63;
02574     else
02575         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02576 
02577     for(i=1; i<=nCoeffs; i++) {
02578         level = block[i];
02579         if (level) {
02580             if (level < 0) {
02581                 level = level * qmul - qadd;
02582             } else {
02583                 level = level * qmul + qadd;
02584             }
02585             block[i] = level;
02586         }
02587     }
02588 }
02589 
02590 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02591                                   DCTELEM *block, int n, int qscale)
02592 {
02593     int i, level, qmul, qadd;
02594     int nCoeffs;
02595 
02596     assert(s->block_last_index[n]>=0);
02597 
02598     qadd = (qscale - 1) | 1;
02599     qmul = qscale << 1;
02600 
02601     nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02602 
02603     for(i=0; i<=nCoeffs; i++) {
02604         level = block[i];
02605         if (level) {
02606             if (level < 0) {
02607                 level = level * qmul - qadd;
02608             } else {
02609                 level = level * qmul + qadd;
02610             }
02611             block[i] = level;
02612         }
02613     }
02614 }
02615 
02619 void ff_set_qscale(MpegEncContext * s, int qscale)
02620 {
02621     if (qscale < 1)
02622         qscale = 1;
02623     else if (qscale > 31)
02624         qscale = 31;
02625 
02626     s->qscale = qscale;
02627     s->chroma_qscale= s->chroma_qscale_table[qscale];
02628 
02629     s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02630     s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02631 }
02632 
02633 void MPV_report_decode_progress(MpegEncContext *s)
02634 {
02635     if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
02636         ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);
02637 }