Libav 0.7.1
ffplay.c
Go to the documentation of this file.
00001 /*
00002  * ffplay : Simple Media Player based on the Libav libraries
00003  * Copyright (c) 2003 Fabrice Bellard
00004  *
00005  * This file is part of Libav.
00006  *
00007  * Libav is free software; you can redistribute it and/or
00008  * modify it under the terms of the GNU Lesser General Public
00009  * License as published by the Free Software Foundation; either
00010  * version 2.1 of the License, or (at your option) any later version.
00011  *
00012  * Libav is distributed in the hope that it will be useful,
00013  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00014  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00015  * Lesser General Public License for more details.
00016  *
00017  * You should have received a copy of the GNU Lesser General Public
00018  * License along with Libav; if not, write to the Free Software
00019  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00020  */
00021 
00022 #include "config.h"
00023 #include <inttypes.h>
00024 #include <math.h>
00025 #include <limits.h>
00026 #include "libavutil/avstring.h"
00027 #include "libavutil/colorspace.h"
00028 #include "libavutil/pixdesc.h"
00029 #include "libavutil/imgutils.h"
00030 #include "libavutil/dict.h"
00031 #include "libavutil/parseutils.h"
00032 #include "libavutil/samplefmt.h"
00033 #include "libavformat/avformat.h"
00034 #include "libavdevice/avdevice.h"
00035 #include "libswscale/swscale.h"
00036 #include "libavcodec/audioconvert.h"
00037 #include "libavutil/opt.h"
00038 #include "libavcodec/avfft.h"
00039 
00040 #if CONFIG_AVFILTER
00041 # include "libavfilter/avfilter.h"
00042 # include "libavfilter/avfiltergraph.h"
00043 #endif
00044 
00045 #include "cmdutils.h"
00046 
00047 #include <SDL.h>
00048 #include <SDL_thread.h>
00049 
00050 #ifdef __MINGW32__
00051 #undef main /* We don't want SDL to override our main() */
00052 #endif
00053 
00054 #include <unistd.h>
00055 #include <assert.h>
00056 
00057 const char program_name[] = "ffplay";
00058 const int program_birth_year = 2003;
00059 
00060 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
00061 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
00062 #define MIN_FRAMES 5
00063 
00064 /* SDL audio buffer size, in samples. Should be small to have precise
00065    A/V sync as SDL does not have hardware buffer fullness info. */
00066 #define SDL_AUDIO_BUFFER_SIZE 1024
00067 
00068 /* no AV sync correction is done if below the AV sync threshold */
00069 #define AV_SYNC_THRESHOLD 0.01
00070 /* no AV correction is done if too big error */
00071 #define AV_NOSYNC_THRESHOLD 10.0
00072 
00073 #define FRAME_SKIP_FACTOR 0.05
00074 
00075 /* maximum audio speed change to get correct sync */
00076 #define SAMPLE_CORRECTION_PERCENT_MAX 10
00077 
00078 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
00079 #define AUDIO_DIFF_AVG_NB   20
00080 
00081 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
00082 #define SAMPLE_ARRAY_SIZE (2*65536)
00083 
00084 static int sws_flags = SWS_BICUBIC;
00085 
00086 typedef struct PacketQueue {
00087     AVPacketList *first_pkt, *last_pkt;
00088     int nb_packets;
00089     int size;
00090     int abort_request;
00091     SDL_mutex *mutex;
00092     SDL_cond *cond;
00093 } PacketQueue;
00094 
00095 #define VIDEO_PICTURE_QUEUE_SIZE 2
00096 #define SUBPICTURE_QUEUE_SIZE 4
00097 
00098 typedef struct VideoPicture {
00099     double pts;                                  
00100     double target_clock;                         
00101     int64_t pos;                                 
00102     SDL_Overlay *bmp;
00103     int width, height; /* source height & width */
00104     int allocated;
00105     enum PixelFormat pix_fmt;
00106 
00107 #if CONFIG_AVFILTER
00108     AVFilterBufferRef *picref;
00109 #endif
00110 } VideoPicture;
00111 
00112 typedef struct SubPicture {
00113     double pts; /* presentation time stamp for this picture */
00114     AVSubtitle sub;
00115 } SubPicture;
00116 
00117 enum {
00118     AV_SYNC_AUDIO_MASTER, /* default choice */
00119     AV_SYNC_VIDEO_MASTER,
00120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
00121 };
00122 
00123 typedef struct VideoState {
00124     SDL_Thread *parse_tid;
00125     SDL_Thread *video_tid;
00126     SDL_Thread *refresh_tid;
00127     AVInputFormat *iformat;
00128     int no_background;
00129     int abort_request;
00130     int paused;
00131     int last_paused;
00132     int seek_req;
00133     int seek_flags;
00134     int64_t seek_pos;
00135     int64_t seek_rel;
00136     int read_pause_return;
00137     AVFormatContext *ic;
00138     int dtg_active_format;
00139 
00140     int audio_stream;
00141 
00142     int av_sync_type;
00143     double external_clock; /* external clock base */
00144     int64_t external_clock_time;
00145 
00146     double audio_clock;
00147     double audio_diff_cum; /* used for AV difference average computation */
00148     double audio_diff_avg_coef;
00149     double audio_diff_threshold;
00150     int audio_diff_avg_count;
00151     AVStream *audio_st;
00152     PacketQueue audioq;
00153     int audio_hw_buf_size;
00154     /* samples output by the codec. we reserve more space for avsync
00155        compensation */
00156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
00157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
00158     uint8_t *audio_buf;
00159     unsigned int audio_buf_size; /* in bytes */
00160     int audio_buf_index; /* in bytes */
00161     AVPacket audio_pkt_temp;
00162     AVPacket audio_pkt;
00163     enum AVSampleFormat audio_src_fmt;
00164     AVAudioConvert *reformat_ctx;
00165 
00166     int show_audio; /* if true, display audio samples */
00167     int16_t sample_array[SAMPLE_ARRAY_SIZE];
00168     int sample_array_index;
00169     int last_i_start;
00170     RDFTContext *rdft;
00171     int rdft_bits;
00172     FFTSample *rdft_data;
00173     int xpos;
00174 
00175     SDL_Thread *subtitle_tid;
00176     int subtitle_stream;
00177     int subtitle_stream_changed;
00178     AVStream *subtitle_st;
00179     PacketQueue subtitleq;
00180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
00181     int subpq_size, subpq_rindex, subpq_windex;
00182     SDL_mutex *subpq_mutex;
00183     SDL_cond *subpq_cond;
00184 
00185     double frame_timer;
00186     double frame_last_pts;
00187     double frame_last_delay;
00188     double video_clock;                          
00189     int video_stream;
00190     AVStream *video_st;
00191     PacketQueue videoq;
00192     double video_current_pts;                    
00193     double video_current_pts_drift;              
00194     int64_t video_current_pos;                   
00195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
00196     int pictq_size, pictq_rindex, pictq_windex;
00197     SDL_mutex *pictq_mutex;
00198     SDL_cond *pictq_cond;
00199 #if !CONFIG_AVFILTER
00200     struct SwsContext *img_convert_ctx;
00201 #endif
00202 
00203     //    QETimer *video_timer;
00204     char filename[1024];
00205     int width, height, xleft, ytop;
00206 
00207     PtsCorrectionContext pts_ctx;
00208 
00209 #if CONFIG_AVFILTER
00210     AVFilterContext *out_video_filter;          
00211 #endif
00212 
00213     float skip_frames;
00214     float skip_frames_index;
00215     int refresh;
00216 } VideoState;
00217 
00218 static void show_help(void);
00219 
00220 /* options specified by the user */
00221 static AVInputFormat *file_iformat;
00222 static const char *input_filename;
00223 static const char *window_title;
00224 static int fs_screen_width;
00225 static int fs_screen_height;
00226 static int screen_width = 0;
00227 static int screen_height = 0;
00228 static int frame_width = 0;
00229 static int frame_height = 0;
00230 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
00231 static int audio_disable;
00232 static int video_disable;
00233 static int wanted_stream[AVMEDIA_TYPE_NB]={
00234     [AVMEDIA_TYPE_AUDIO]=-1,
00235     [AVMEDIA_TYPE_VIDEO]=-1,
00236     [AVMEDIA_TYPE_SUBTITLE]=-1,
00237 };
00238 static int seek_by_bytes=-1;
00239 static int display_disable;
00240 static int show_status = 1;
00241 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
00242 static int64_t start_time = AV_NOPTS_VALUE;
00243 static int64_t duration = AV_NOPTS_VALUE;
00244 static int debug = 0;
00245 static int debug_mv = 0;
00246 static int step = 0;
00247 static int thread_count = 1;
00248 static int workaround_bugs = 1;
00249 static int fast = 0;
00250 static int genpts = 0;
00251 static int lowres = 0;
00252 static int idct = FF_IDCT_AUTO;
00253 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
00254 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
00255 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
00256 static int error_recognition = FF_ER_CAREFUL;
00257 static int error_concealment = 3;
00258 static int decoder_reorder_pts= -1;
00259 static int autoexit;
00260 static int exit_on_keydown;
00261 static int exit_on_mousedown;
00262 static int loop=1;
00263 static int framedrop=1;
00264 
00265 static int rdftspeed=20;
00266 #if CONFIG_AVFILTER
00267 static char *vfilters = NULL;
00268 #endif
00269 
00270 /* current context */
00271 static int is_full_screen;
00272 static VideoState *cur_stream;
00273 static int64_t audio_callback_time;
00274 
00275 static AVPacket flush_pkt;
00276 
00277 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
00278 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
00279 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
00280 
00281 static SDL_Surface *screen;
00282 
00283 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
00284 
00285 /* packet queue handling */
00286 static void packet_queue_init(PacketQueue *q)
00287 {
00288     memset(q, 0, sizeof(PacketQueue));
00289     q->mutex = SDL_CreateMutex();
00290     q->cond = SDL_CreateCond();
00291     packet_queue_put(q, &flush_pkt);
00292 }
00293 
00294 static void packet_queue_flush(PacketQueue *q)
00295 {
00296     AVPacketList *pkt, *pkt1;
00297 
00298     SDL_LockMutex(q->mutex);
00299     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
00300         pkt1 = pkt->next;
00301         av_free_packet(&pkt->pkt);
00302         av_freep(&pkt);
00303     }
00304     q->last_pkt = NULL;
00305     q->first_pkt = NULL;
00306     q->nb_packets = 0;
00307     q->size = 0;
00308     SDL_UnlockMutex(q->mutex);
00309 }
00310 
00311 static void packet_queue_end(PacketQueue *q)
00312 {
00313     packet_queue_flush(q);
00314     SDL_DestroyMutex(q->mutex);
00315     SDL_DestroyCond(q->cond);
00316 }
00317 
00318 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
00319 {
00320     AVPacketList *pkt1;
00321 
00322     /* duplicate the packet */
00323     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
00324         return -1;
00325 
00326     pkt1 = av_malloc(sizeof(AVPacketList));
00327     if (!pkt1)
00328         return -1;
00329     pkt1->pkt = *pkt;
00330     pkt1->next = NULL;
00331 
00332 
00333     SDL_LockMutex(q->mutex);
00334 
00335     if (!q->last_pkt)
00336 
00337         q->first_pkt = pkt1;
00338     else
00339         q->last_pkt->next = pkt1;
00340     q->last_pkt = pkt1;
00341     q->nb_packets++;
00342     q->size += pkt1->pkt.size + sizeof(*pkt1);
00343     /* XXX: should duplicate packet data in DV case */
00344     SDL_CondSignal(q->cond);
00345 
00346     SDL_UnlockMutex(q->mutex);
00347     return 0;
00348 }
00349 
00350 static void packet_queue_abort(PacketQueue *q)
00351 {
00352     SDL_LockMutex(q->mutex);
00353 
00354     q->abort_request = 1;
00355 
00356     SDL_CondSignal(q->cond);
00357 
00358     SDL_UnlockMutex(q->mutex);
00359 }
00360 
00361 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
00362 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
00363 {
00364     AVPacketList *pkt1;
00365     int ret;
00366 
00367     SDL_LockMutex(q->mutex);
00368 
00369     for(;;) {
00370         if (q->abort_request) {
00371             ret = -1;
00372             break;
00373         }
00374 
00375         pkt1 = q->first_pkt;
00376         if (pkt1) {
00377             q->first_pkt = pkt1->next;
00378             if (!q->first_pkt)
00379                 q->last_pkt = NULL;
00380             q->nb_packets--;
00381             q->size -= pkt1->pkt.size + sizeof(*pkt1);
00382             *pkt = pkt1->pkt;
00383             av_free(pkt1);
00384             ret = 1;
00385             break;
00386         } else if (!block) {
00387             ret = 0;
00388             break;
00389         } else {
00390             SDL_CondWait(q->cond, q->mutex);
00391         }
00392     }
00393     SDL_UnlockMutex(q->mutex);
00394     return ret;
00395 }
00396 
00397 static inline void fill_rectangle(SDL_Surface *screen,
00398                                   int x, int y, int w, int h, int color)
00399 {
00400     SDL_Rect rect;
00401     rect.x = x;
00402     rect.y = y;
00403     rect.w = w;
00404     rect.h = h;
00405     SDL_FillRect(screen, &rect, color);
00406 }
00407 
00408 #define ALPHA_BLEND(a, oldp, newp, s)\
00409 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
00410 
00411 #define RGBA_IN(r, g, b, a, s)\
00412 {\
00413     unsigned int v = ((const uint32_t *)(s))[0];\
00414     a = (v >> 24) & 0xff;\
00415     r = (v >> 16) & 0xff;\
00416     g = (v >> 8) & 0xff;\
00417     b = v & 0xff;\
00418 }
00419 
00420 #define YUVA_IN(y, u, v, a, s, pal)\
00421 {\
00422     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
00423     a = (val >> 24) & 0xff;\
00424     y = (val >> 16) & 0xff;\
00425     u = (val >> 8) & 0xff;\
00426     v = val & 0xff;\
00427 }
00428 
00429 #define YUVA_OUT(d, y, u, v, a)\
00430 {\
00431     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
00432 }
00433 
00434 
00435 #define BPP 1
00436 
00437 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
00438 {
00439     int wrap, wrap3, width2, skip2;
00440     int y, u, v, a, u1, v1, a1, w, h;
00441     uint8_t *lum, *cb, *cr;
00442     const uint8_t *p;
00443     const uint32_t *pal;
00444     int dstx, dsty, dstw, dsth;
00445 
00446     dstw = av_clip(rect->w, 0, imgw);
00447     dsth = av_clip(rect->h, 0, imgh);
00448     dstx = av_clip(rect->x, 0, imgw - dstw);
00449     dsty = av_clip(rect->y, 0, imgh - dsth);
00450     lum = dst->data[0] + dsty * dst->linesize[0];
00451     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
00452     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
00453 
00454     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
00455     skip2 = dstx >> 1;
00456     wrap = dst->linesize[0];
00457     wrap3 = rect->pict.linesize[0];
00458     p = rect->pict.data[0];
00459     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
00460 
00461     if (dsty & 1) {
00462         lum += dstx;
00463         cb += skip2;
00464         cr += skip2;
00465 
00466         if (dstx & 1) {
00467             YUVA_IN(y, u, v, a, p, pal);
00468             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00469             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00470             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00471             cb++;
00472             cr++;
00473             lum++;
00474             p += BPP;
00475         }
00476         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00477             YUVA_IN(y, u, v, a, p, pal);
00478             u1 = u;
00479             v1 = v;
00480             a1 = a;
00481             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00482 
00483             YUVA_IN(y, u, v, a, p + BPP, pal);
00484             u1 += u;
00485             v1 += v;
00486             a1 += a;
00487             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00488             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00489             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00490             cb++;
00491             cr++;
00492             p += 2 * BPP;
00493             lum += 2;
00494         }
00495         if (w) {
00496             YUVA_IN(y, u, v, a, p, pal);
00497             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00498             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00499             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00500             p++;
00501             lum++;
00502         }
00503         p += wrap3 - dstw * BPP;
00504         lum += wrap - dstw - dstx;
00505         cb += dst->linesize[1] - width2 - skip2;
00506         cr += dst->linesize[2] - width2 - skip2;
00507     }
00508     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
00509         lum += dstx;
00510         cb += skip2;
00511         cr += skip2;
00512 
00513         if (dstx & 1) {
00514             YUVA_IN(y, u, v, a, p, pal);
00515             u1 = u;
00516             v1 = v;
00517             a1 = a;
00518             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00519             p += wrap3;
00520             lum += wrap;
00521             YUVA_IN(y, u, v, a, p, pal);
00522             u1 += u;
00523             v1 += v;
00524             a1 += a;
00525             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00526             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00527             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00528             cb++;
00529             cr++;
00530             p += -wrap3 + BPP;
00531             lum += -wrap + 1;
00532         }
00533         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00534             YUVA_IN(y, u, v, a, p, pal);
00535             u1 = u;
00536             v1 = v;
00537             a1 = a;
00538             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00539 
00540             YUVA_IN(y, u, v, a, p + BPP, pal);
00541             u1 += u;
00542             v1 += v;
00543             a1 += a;
00544             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00545             p += wrap3;
00546             lum += wrap;
00547 
00548             YUVA_IN(y, u, v, a, p, pal);
00549             u1 += u;
00550             v1 += v;
00551             a1 += a;
00552             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00553 
00554             YUVA_IN(y, u, v, a, p + BPP, pal);
00555             u1 += u;
00556             v1 += v;
00557             a1 += a;
00558             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00559 
00560             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
00561             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
00562 
00563             cb++;
00564             cr++;
00565             p += -wrap3 + 2 * BPP;
00566             lum += -wrap + 2;
00567         }
00568         if (w) {
00569             YUVA_IN(y, u, v, a, p, pal);
00570             u1 = u;
00571             v1 = v;
00572             a1 = a;
00573             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00574             p += wrap3;
00575             lum += wrap;
00576             YUVA_IN(y, u, v, a, p, pal);
00577             u1 += u;
00578             v1 += v;
00579             a1 += a;
00580             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00581             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00582             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00583             cb++;
00584             cr++;
00585             p += -wrap3 + BPP;
00586             lum += -wrap + 1;
00587         }
00588         p += wrap3 + (wrap3 - dstw * BPP);
00589         lum += wrap + (wrap - dstw - dstx);
00590         cb += dst->linesize[1] - width2 - skip2;
00591         cr += dst->linesize[2] - width2 - skip2;
00592     }
00593     /* handle odd height */
00594     if (h) {
00595         lum += dstx;
00596         cb += skip2;
00597         cr += skip2;
00598 
00599         if (dstx & 1) {
00600             YUVA_IN(y, u, v, a, p, pal);
00601             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00602             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00603             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00604             cb++;
00605             cr++;
00606             lum++;
00607             p += BPP;
00608         }
00609         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00610             YUVA_IN(y, u, v, a, p, pal);
00611             u1 = u;
00612             v1 = v;
00613             a1 = a;
00614             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00615 
00616             YUVA_IN(y, u, v, a, p + BPP, pal);
00617             u1 += u;
00618             v1 += v;
00619             a1 += a;
00620             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
00622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
00623             cb++;
00624             cr++;
00625             p += 2 * BPP;
00626             lum += 2;
00627         }
00628         if (w) {
00629             YUVA_IN(y, u, v, a, p, pal);
00630             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00631             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00632             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00633         }
00634     }
00635 }
00636 
00637 static void free_subpicture(SubPicture *sp)
00638 {
00639     avsubtitle_free(&sp->sub);
00640 }
00641 
00642 static void video_image_display(VideoState *is)
00643 {
00644     VideoPicture *vp;
00645     SubPicture *sp;
00646     AVPicture pict;
00647     float aspect_ratio;
00648     int width, height, x, y;
00649     SDL_Rect rect;
00650     int i;
00651 
00652     vp = &is->pictq[is->pictq_rindex];
00653     if (vp->bmp) {
00654 #if CONFIG_AVFILTER
00655          if (vp->picref->video->pixel_aspect.num == 0)
00656              aspect_ratio = 0;
00657          else
00658              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
00659 #else
00660 
00661         /* XXX: use variable in the frame */
00662         if (is->video_st->sample_aspect_ratio.num)
00663             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
00664         else if (is->video_st->codec->sample_aspect_ratio.num)
00665             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
00666         else
00667             aspect_ratio = 0;
00668 #endif
00669         if (aspect_ratio <= 0.0)
00670             aspect_ratio = 1.0;
00671         aspect_ratio *= (float)vp->width / (float)vp->height;
00672 
00673         if (is->subtitle_st)
00674         {
00675             if (is->subpq_size > 0)
00676             {
00677                 sp = &is->subpq[is->subpq_rindex];
00678 
00679                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
00680                 {
00681                     SDL_LockYUVOverlay (vp->bmp);
00682 
00683                     pict.data[0] = vp->bmp->pixels[0];
00684                     pict.data[1] = vp->bmp->pixels[2];
00685                     pict.data[2] = vp->bmp->pixels[1];
00686 
00687                     pict.linesize[0] = vp->bmp->pitches[0];
00688                     pict.linesize[1] = vp->bmp->pitches[2];
00689                     pict.linesize[2] = vp->bmp->pitches[1];
00690 
00691                     for (i = 0; i < sp->sub.num_rects; i++)
00692                         blend_subrect(&pict, sp->sub.rects[i],
00693                                       vp->bmp->w, vp->bmp->h);
00694 
00695                     SDL_UnlockYUVOverlay (vp->bmp);
00696                 }
00697             }
00698         }
00699 
00700 
00701         /* XXX: we suppose the screen has a 1.0 pixel ratio */
00702         height = is->height;
00703         width = ((int)rint(height * aspect_ratio)) & ~1;
00704         if (width > is->width) {
00705             width = is->width;
00706             height = ((int)rint(width / aspect_ratio)) & ~1;
00707         }
00708         x = (is->width - width) / 2;
00709         y = (is->height - height) / 2;
00710         is->no_background = 0;
00711         rect.x = is->xleft + x;
00712         rect.y = is->ytop  + y;
00713         rect.w = width;
00714         rect.h = height;
00715         SDL_DisplayYUVOverlay(vp->bmp, &rect);
00716     }
00717 }
00718 
00719 /* get the current audio output buffer size, in samples. With SDL, we
00720    cannot have a precise information */
00721 static int audio_write_get_buf_size(VideoState *is)
00722 {
00723     return is->audio_buf_size - is->audio_buf_index;
00724 }
00725 
00726 static inline int compute_mod(int a, int b)
00727 {
00728     a = a % b;
00729     if (a >= 0)
00730         return a;
00731     else
00732         return a + b;
00733 }
00734 
00735 static void video_audio_display(VideoState *s)
00736 {
00737     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
00738     int ch, channels, h, h2, bgcolor, fgcolor;
00739     int16_t time_diff;
00740     int rdft_bits, nb_freq;
00741 
00742     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
00743         ;
00744     nb_freq= 1<<(rdft_bits-1);
00745 
00746     /* compute display index : center on currently output samples */
00747     channels = s->audio_st->codec->channels;
00748     nb_display_channels = channels;
00749     if (!s->paused) {
00750         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
00751         n = 2 * channels;
00752         delay = audio_write_get_buf_size(s);
00753         delay /= n;
00754 
00755         /* to be more precise, we take into account the time spent since
00756            the last buffer computation */
00757         if (audio_callback_time) {
00758             time_diff = av_gettime() - audio_callback_time;
00759             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
00760         }
00761 
00762         delay += 2*data_used;
00763         if (delay < data_used)
00764             delay = data_used;
00765 
00766         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
00767         if(s->show_audio==1){
00768             h= INT_MIN;
00769             for(i=0; i<1000; i+=channels){
00770                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
00771                 int a= s->sample_array[idx];
00772                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
00773                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
00774                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
00775                 int score= a-d;
00776                 if(h<score && (b^c)<0){
00777                     h= score;
00778                     i_start= idx;
00779                 }
00780             }
00781         }
00782 
00783         s->last_i_start = i_start;
00784     } else {
00785         i_start = s->last_i_start;
00786     }
00787 
00788     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
00789     if(s->show_audio==1){
00790         fill_rectangle(screen,
00791                        s->xleft, s->ytop, s->width, s->height,
00792                        bgcolor);
00793 
00794         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
00795 
00796         /* total height for one channel */
00797         h = s->height / nb_display_channels;
00798         /* graph height / 2 */
00799         h2 = (h * 9) / 20;
00800         for(ch = 0;ch < nb_display_channels; ch++) {
00801             i = i_start + ch;
00802             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
00803             for(x = 0; x < s->width; x++) {
00804                 y = (s->sample_array[i] * h2) >> 15;
00805                 if (y < 0) {
00806                     y = -y;
00807                     ys = y1 - y;
00808                 } else {
00809                     ys = y1;
00810                 }
00811                 fill_rectangle(screen,
00812                                s->xleft + x, ys, 1, y,
00813                                fgcolor);
00814                 i += channels;
00815                 if (i >= SAMPLE_ARRAY_SIZE)
00816                     i -= SAMPLE_ARRAY_SIZE;
00817             }
00818         }
00819 
00820         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
00821 
00822         for(ch = 1;ch < nb_display_channels; ch++) {
00823             y = s->ytop + ch * h;
00824             fill_rectangle(screen,
00825                            s->xleft, y, s->width, 1,
00826                            fgcolor);
00827         }
00828         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
00829     }else{
00830         nb_display_channels= FFMIN(nb_display_channels, 2);
00831         if(rdft_bits != s->rdft_bits){
00832             av_rdft_end(s->rdft);
00833             av_free(s->rdft_data);
00834             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
00835             s->rdft_bits= rdft_bits;
00836             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
00837         }
00838         {
00839             FFTSample *data[2];
00840             for(ch = 0;ch < nb_display_channels; ch++) {
00841                 data[ch] = s->rdft_data + 2*nb_freq*ch;
00842                 i = i_start + ch;
00843                 for(x = 0; x < 2*nb_freq; x++) {
00844                     double w= (x-nb_freq)*(1.0/nb_freq);
00845                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
00846                     i += channels;
00847                     if (i >= SAMPLE_ARRAY_SIZE)
00848                         i -= SAMPLE_ARRAY_SIZE;
00849                 }
00850                 av_rdft_calc(s->rdft, data[ch]);
00851             }
00852             //least efficient way to do this, we should of course directly access it but its more than fast enough
00853             for(y=0; y<s->height; y++){
00854                 double w= 1/sqrt(nb_freq);
00855                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
00856                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
00857                        + data[1][2*y+1]*data[1][2*y+1])) : a;
00858                 a= FFMIN(a,255);
00859                 b= FFMIN(b,255);
00860                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
00861 
00862                 fill_rectangle(screen,
00863                             s->xpos, s->height-y, 1, 1,
00864                             fgcolor);
00865             }
00866         }
00867         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
00868         s->xpos++;
00869         if(s->xpos >= s->width)
00870             s->xpos= s->xleft;
00871     }
00872 }
00873 
00874 static int video_open(VideoState *is){
00875     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
00876     int w,h;
00877 
00878     if(is_full_screen) flags |= SDL_FULLSCREEN;
00879     else               flags |= SDL_RESIZABLE;
00880 
00881     if (is_full_screen && fs_screen_width) {
00882         w = fs_screen_width;
00883         h = fs_screen_height;
00884     } else if(!is_full_screen && screen_width){
00885         w = screen_width;
00886         h = screen_height;
00887 #if CONFIG_AVFILTER
00888     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
00889         w = is->out_video_filter->inputs[0]->w;
00890         h = is->out_video_filter->inputs[0]->h;
00891 #else
00892     }else if (is->video_st && is->video_st->codec->width){
00893         w = is->video_st->codec->width;
00894         h = is->video_st->codec->height;
00895 #endif
00896     } else {
00897         w = 640;
00898         h = 480;
00899     }
00900     if(screen && is->width == screen->w && screen->w == w
00901        && is->height== screen->h && screen->h == h)
00902         return 0;
00903 
00904 #ifndef __APPLE__
00905     screen = SDL_SetVideoMode(w, h, 0, flags);
00906 #else
00907     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
00908     screen = SDL_SetVideoMode(w, h, 24, flags);
00909 #endif
00910     if (!screen) {
00911         fprintf(stderr, "SDL: could not set video mode - exiting\n");
00912         return -1;
00913     }
00914     if (!window_title)
00915         window_title = input_filename;
00916     SDL_WM_SetCaption(window_title, window_title);
00917 
00918     is->width = screen->w;
00919     is->height = screen->h;
00920 
00921     return 0;
00922 }
00923 
00924 /* display the current picture, if any */
00925 static void video_display(VideoState *is)
00926 {
00927     if(!screen)
00928         video_open(cur_stream);
00929     if (is->audio_st && is->show_audio)
00930         video_audio_display(is);
00931     else if (is->video_st)
00932         video_image_display(is);
00933 }
00934 
00935 static int refresh_thread(void *opaque)
00936 {
00937     VideoState *is= opaque;
00938     while(!is->abort_request){
00939         SDL_Event event;
00940         event.type = FF_REFRESH_EVENT;
00941         event.user.data1 = opaque;
00942         if(!is->refresh){
00943             is->refresh=1;
00944             SDL_PushEvent(&event);
00945         }
00946         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
00947     }
00948     return 0;
00949 }
00950 
00951 /* get the current audio clock value */
00952 static double get_audio_clock(VideoState *is)
00953 {
00954     double pts;
00955     int hw_buf_size, bytes_per_sec;
00956     pts = is->audio_clock;
00957     hw_buf_size = audio_write_get_buf_size(is);
00958     bytes_per_sec = 0;
00959     if (is->audio_st) {
00960         bytes_per_sec = is->audio_st->codec->sample_rate *
00961             2 * is->audio_st->codec->channels;
00962     }
00963     if (bytes_per_sec)
00964         pts -= (double)hw_buf_size / bytes_per_sec;
00965     return pts;
00966 }
00967 
00968 /* get the current video clock value */
00969 static double get_video_clock(VideoState *is)
00970 {
00971     if (is->paused) {
00972         return is->video_current_pts;
00973     } else {
00974         return is->video_current_pts_drift + av_gettime() / 1000000.0;
00975     }
00976 }
00977 
00978 /* get the current external clock value */
00979 static double get_external_clock(VideoState *is)
00980 {
00981     int64_t ti;
00982     ti = av_gettime();
00983     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
00984 }
00985 
00986 /* get the current master clock value */
00987 static double get_master_clock(VideoState *is)
00988 {
00989     double val;
00990 
00991     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
00992         if (is->video_st)
00993             val = get_video_clock(is);
00994         else
00995             val = get_audio_clock(is);
00996     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
00997         if (is->audio_st)
00998             val = get_audio_clock(is);
00999         else
01000             val = get_video_clock(is);
01001     } else {
01002         val = get_external_clock(is);
01003     }
01004     return val;
01005 }
01006 
01007 /* seek in the stream */
01008 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
01009 {
01010     if (!is->seek_req) {
01011         is->seek_pos = pos;
01012         is->seek_rel = rel;
01013         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
01014         if (seek_by_bytes)
01015             is->seek_flags |= AVSEEK_FLAG_BYTE;
01016         is->seek_req = 1;
01017     }
01018 }
01019 
01020 /* pause or resume the video */
01021 static void stream_pause(VideoState *is)
01022 {
01023     if (is->paused) {
01024         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
01025         if(is->read_pause_return != AVERROR(ENOSYS)){
01026             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
01027         }
01028         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
01029     }
01030     is->paused = !is->paused;
01031 }
01032 
01033 static double compute_target_time(double frame_current_pts, VideoState *is)
01034 {
01035     double delay, sync_threshold, diff;
01036 
01037     /* compute nominal delay */
01038     delay = frame_current_pts - is->frame_last_pts;
01039     if (delay <= 0 || delay >= 10.0) {
01040         /* if incorrect delay, use previous one */
01041         delay = is->frame_last_delay;
01042     } else {
01043         is->frame_last_delay = delay;
01044     }
01045     is->frame_last_pts = frame_current_pts;
01046 
01047     /* update delay to follow master synchronisation source */
01048     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
01049          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01050         /* if video is slave, we try to correct big delays by
01051            duplicating or deleting a frame */
01052         diff = get_video_clock(is) - get_master_clock(is);
01053 
01054         /* skip or repeat frame. We take into account the
01055            delay to compute the threshold. I still don't know
01056            if it is the best guess */
01057         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
01058         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
01059             if (diff <= -sync_threshold)
01060                 delay = 0;
01061             else if (diff >= sync_threshold)
01062                 delay = 2 * delay;
01063         }
01064     }
01065     is->frame_timer += delay;
01066 
01067     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
01068             delay, frame_current_pts, -diff);
01069 
01070     return is->frame_timer;
01071 }
01072 
01073 /* called to display each frame */
01074 static void video_refresh_timer(void *opaque)
01075 {
01076     VideoState *is = opaque;
01077     VideoPicture *vp;
01078 
01079     SubPicture *sp, *sp2;
01080 
01081     if (is->video_st) {
01082 retry:
01083         if (is->pictq_size == 0) {
01084             //nothing to do, no picture to display in the que
01085         } else {
01086             double time= av_gettime()/1000000.0;
01087             double next_target;
01088             /* dequeue the picture */
01089             vp = &is->pictq[is->pictq_rindex];
01090 
01091             if(time < vp->target_clock)
01092                 return;
01093             /* update current video pts */
01094             is->video_current_pts = vp->pts;
01095             is->video_current_pts_drift = is->video_current_pts - time;
01096             is->video_current_pos = vp->pos;
01097             if(is->pictq_size > 1){
01098                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
01099                 assert(nextvp->target_clock >= vp->target_clock);
01100                 next_target= nextvp->target_clock;
01101             }else{
01102                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
01103             }
01104             if(framedrop && time > next_target){
01105                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
01106                 if(is->pictq_size > 1 || time > next_target + 0.5){
01107                     /* update queue size and signal for next picture */
01108                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01109                         is->pictq_rindex = 0;
01110 
01111                     SDL_LockMutex(is->pictq_mutex);
01112                     is->pictq_size--;
01113                     SDL_CondSignal(is->pictq_cond);
01114                     SDL_UnlockMutex(is->pictq_mutex);
01115                     goto retry;
01116                 }
01117             }
01118 
01119             if(is->subtitle_st) {
01120                 if (is->subtitle_stream_changed) {
01121                     SDL_LockMutex(is->subpq_mutex);
01122 
01123                     while (is->subpq_size) {
01124                         free_subpicture(&is->subpq[is->subpq_rindex]);
01125 
01126                         /* update queue size and signal for next picture */
01127                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01128                             is->subpq_rindex = 0;
01129 
01130                         is->subpq_size--;
01131                     }
01132                     is->subtitle_stream_changed = 0;
01133 
01134                     SDL_CondSignal(is->subpq_cond);
01135                     SDL_UnlockMutex(is->subpq_mutex);
01136                 } else {
01137                     if (is->subpq_size > 0) {
01138                         sp = &is->subpq[is->subpq_rindex];
01139 
01140                         if (is->subpq_size > 1)
01141                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
01142                         else
01143                             sp2 = NULL;
01144 
01145                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
01146                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
01147                         {
01148                             free_subpicture(sp);
01149 
01150                             /* update queue size and signal for next picture */
01151                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01152                                 is->subpq_rindex = 0;
01153 
01154                             SDL_LockMutex(is->subpq_mutex);
01155                             is->subpq_size--;
01156                             SDL_CondSignal(is->subpq_cond);
01157                             SDL_UnlockMutex(is->subpq_mutex);
01158                         }
01159                     }
01160                 }
01161             }
01162 
01163             /* display picture */
01164             if (!display_disable)
01165                 video_display(is);
01166 
01167             /* update queue size and signal for next picture */
01168             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01169                 is->pictq_rindex = 0;
01170 
01171             SDL_LockMutex(is->pictq_mutex);
01172             is->pictq_size--;
01173             SDL_CondSignal(is->pictq_cond);
01174             SDL_UnlockMutex(is->pictq_mutex);
01175         }
01176     } else if (is->audio_st) {
01177         /* draw the next audio frame */
01178 
01179         /* if only audio stream, then display the audio bars (better
01180            than nothing, just to test the implementation */
01181 
01182         /* display picture */
01183         if (!display_disable)
01184             video_display(is);
01185     }
01186     if (show_status) {
01187         static int64_t last_time;
01188         int64_t cur_time;
01189         int aqsize, vqsize, sqsize;
01190         double av_diff;
01191 
01192         cur_time = av_gettime();
01193         if (!last_time || (cur_time - last_time) >= 30000) {
01194             aqsize = 0;
01195             vqsize = 0;
01196             sqsize = 0;
01197             if (is->audio_st)
01198                 aqsize = is->audioq.size;
01199             if (is->video_st)
01200                 vqsize = is->videoq.size;
01201             if (is->subtitle_st)
01202                 sqsize = is->subtitleq.size;
01203             av_diff = 0;
01204             if (is->audio_st && is->video_st)
01205                 av_diff = get_audio_clock(is) - get_video_clock(is);
01206             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
01207                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
01208             fflush(stdout);
01209             last_time = cur_time;
01210         }
01211     }
01212 }
01213 
01214 static void stream_close(VideoState *is)
01215 {
01216     VideoPicture *vp;
01217     int i;
01218     /* XXX: use a special url_shutdown call to abort parse cleanly */
01219     is->abort_request = 1;
01220     SDL_WaitThread(is->parse_tid, NULL);
01221     SDL_WaitThread(is->refresh_tid, NULL);
01222 
01223     /* free all pictures */
01224     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
01225         vp = &is->pictq[i];
01226 #if CONFIG_AVFILTER
01227         if (vp->picref) {
01228             avfilter_unref_buffer(vp->picref);
01229             vp->picref = NULL;
01230         }
01231 #endif
01232         if (vp->bmp) {
01233             SDL_FreeYUVOverlay(vp->bmp);
01234             vp->bmp = NULL;
01235         }
01236     }
01237     SDL_DestroyMutex(is->pictq_mutex);
01238     SDL_DestroyCond(is->pictq_cond);
01239     SDL_DestroyMutex(is->subpq_mutex);
01240     SDL_DestroyCond(is->subpq_cond);
01241 #if !CONFIG_AVFILTER
01242     if (is->img_convert_ctx)
01243         sws_freeContext(is->img_convert_ctx);
01244 #endif
01245     av_free(is);
01246 }
01247 
01248 static void do_exit(void)
01249 {
01250     if (cur_stream) {
01251         stream_close(cur_stream);
01252         cur_stream = NULL;
01253     }
01254     uninit_opts();
01255 #if CONFIG_AVFILTER
01256     avfilter_uninit();
01257 #endif
01258     if (show_status)
01259         printf("\n");
01260     SDL_Quit();
01261     av_log(NULL, AV_LOG_QUIET, "");
01262     exit(0);
01263 }
01264 
01265 /* allocate a picture (needs to do that in main thread to avoid
01266    potential locking problems */
01267 static void alloc_picture(void *opaque)
01268 {
01269     VideoState *is = opaque;
01270     VideoPicture *vp;
01271 
01272     vp = &is->pictq[is->pictq_windex];
01273 
01274     if (vp->bmp)
01275         SDL_FreeYUVOverlay(vp->bmp);
01276 
01277 #if CONFIG_AVFILTER
01278     if (vp->picref)
01279         avfilter_unref_buffer(vp->picref);
01280     vp->picref = NULL;
01281 
01282     vp->width   = is->out_video_filter->inputs[0]->w;
01283     vp->height  = is->out_video_filter->inputs[0]->h;
01284     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
01285 #else
01286     vp->width   = is->video_st->codec->width;
01287     vp->height  = is->video_st->codec->height;
01288     vp->pix_fmt = is->video_st->codec->pix_fmt;
01289 #endif
01290 
01291     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
01292                                    SDL_YV12_OVERLAY,
01293                                    screen);
01294     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
01295         /* SDL allocates a buffer smaller than requested if the video
01296          * overlay hardware is unable to support the requested size. */
01297         fprintf(stderr, "Error: the video system does not support an image\n"
01298                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
01299                         "to reduce the image size.\n", vp->width, vp->height );
01300         do_exit();
01301     }
01302 
01303     SDL_LockMutex(is->pictq_mutex);
01304     vp->allocated = 1;
01305     SDL_CondSignal(is->pictq_cond);
01306     SDL_UnlockMutex(is->pictq_mutex);
01307 }
01308 
01313 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
01314 {
01315     VideoPicture *vp;
01316     int dst_pix_fmt;
01317 #if CONFIG_AVFILTER
01318     AVPicture pict_src;
01319 #endif
01320     /* wait until we have space to put a new picture */
01321     SDL_LockMutex(is->pictq_mutex);
01322 
01323     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
01324         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
01325 
01326     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
01327            !is->videoq.abort_request) {
01328         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01329     }
01330     SDL_UnlockMutex(is->pictq_mutex);
01331 
01332     if (is->videoq.abort_request)
01333         return -1;
01334 
01335     vp = &is->pictq[is->pictq_windex];
01336 
01337     /* alloc or resize hardware picture buffer */
01338     if (!vp->bmp ||
01339 #if CONFIG_AVFILTER
01340         vp->width  != is->out_video_filter->inputs[0]->w ||
01341         vp->height != is->out_video_filter->inputs[0]->h) {
01342 #else
01343         vp->width != is->video_st->codec->width ||
01344         vp->height != is->video_st->codec->height) {
01345 #endif
01346         SDL_Event event;
01347 
01348         vp->allocated = 0;
01349 
01350         /* the allocation must be done in the main thread to avoid
01351            locking problems */
01352         event.type = FF_ALLOC_EVENT;
01353         event.user.data1 = is;
01354         SDL_PushEvent(&event);
01355 
01356         /* wait until the picture is allocated */
01357         SDL_LockMutex(is->pictq_mutex);
01358         while (!vp->allocated && !is->videoq.abort_request) {
01359             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01360         }
01361         SDL_UnlockMutex(is->pictq_mutex);
01362 
01363         if (is->videoq.abort_request)
01364             return -1;
01365     }
01366 
01367     /* if the frame is not skipped, then display it */
01368     if (vp->bmp) {
01369         AVPicture pict;
01370 #if CONFIG_AVFILTER
01371         if(vp->picref)
01372             avfilter_unref_buffer(vp->picref);
01373         vp->picref = src_frame->opaque;
01374 #endif
01375 
01376         /* get a pointer on the bitmap */
01377         SDL_LockYUVOverlay (vp->bmp);
01378 
01379         dst_pix_fmt = PIX_FMT_YUV420P;
01380         memset(&pict,0,sizeof(AVPicture));
01381         pict.data[0] = vp->bmp->pixels[0];
01382         pict.data[1] = vp->bmp->pixels[2];
01383         pict.data[2] = vp->bmp->pixels[1];
01384 
01385         pict.linesize[0] = vp->bmp->pitches[0];
01386         pict.linesize[1] = vp->bmp->pitches[2];
01387         pict.linesize[2] = vp->bmp->pitches[1];
01388 
01389 #if CONFIG_AVFILTER
01390         pict_src.data[0] = src_frame->data[0];
01391         pict_src.data[1] = src_frame->data[1];
01392         pict_src.data[2] = src_frame->data[2];
01393 
01394         pict_src.linesize[0] = src_frame->linesize[0];
01395         pict_src.linesize[1] = src_frame->linesize[1];
01396         pict_src.linesize[2] = src_frame->linesize[2];
01397 
01398         //FIXME use direct rendering
01399         av_picture_copy(&pict, &pict_src,
01400                         vp->pix_fmt, vp->width, vp->height);
01401 #else
01402         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
01403         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
01404             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
01405             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
01406         if (is->img_convert_ctx == NULL) {
01407             fprintf(stderr, "Cannot initialize the conversion context\n");
01408             exit(1);
01409         }
01410         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
01411                   0, vp->height, pict.data, pict.linesize);
01412 #endif
01413         /* update the bitmap content */
01414         SDL_UnlockYUVOverlay(vp->bmp);
01415 
01416         vp->pts = pts;
01417         vp->pos = pos;
01418 
01419         /* now we can update the picture count */
01420         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
01421             is->pictq_windex = 0;
01422         SDL_LockMutex(is->pictq_mutex);
01423         vp->target_clock= compute_target_time(vp->pts, is);
01424 
01425         is->pictq_size++;
01426         SDL_UnlockMutex(is->pictq_mutex);
01427     }
01428     return 0;
01429 }
01430 
01435 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
01436 {
01437     double frame_delay, pts;
01438 
01439     pts = pts1;
01440 
01441     if (pts != 0) {
01442         /* update video clock with pts, if present */
01443         is->video_clock = pts;
01444     } else {
01445         pts = is->video_clock;
01446     }
01447     /* update video clock for next frame */
01448     frame_delay = av_q2d(is->video_st->codec->time_base);
01449     /* for MPEG2, the frame can be repeated, so we update the
01450        clock accordingly */
01451     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
01452     is->video_clock += frame_delay;
01453 
01454     return queue_picture(is, src_frame, pts, pos);
01455 }
01456 
01457 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
01458 {
01459     int len1, got_picture, i;
01460 
01461     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
01462         return -1;
01463 
01464     if (pkt->data == flush_pkt.data) {
01465         avcodec_flush_buffers(is->video_st->codec);
01466 
01467         SDL_LockMutex(is->pictq_mutex);
01468         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
01469         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
01470             is->pictq[i].target_clock= 0;
01471         }
01472         while (is->pictq_size && !is->videoq.abort_request) {
01473             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01474         }
01475         is->video_current_pos = -1;
01476         SDL_UnlockMutex(is->pictq_mutex);
01477 
01478         init_pts_correction(&is->pts_ctx);
01479         is->frame_last_pts = AV_NOPTS_VALUE;
01480         is->frame_last_delay = 0;
01481         is->frame_timer = (double)av_gettime() / 1000000.0;
01482         is->skip_frames = 1;
01483         is->skip_frames_index = 0;
01484         return 0;
01485     }
01486 
01487     len1 = avcodec_decode_video2(is->video_st->codec,
01488                                  frame, &got_picture,
01489                                  pkt);
01490 
01491     if (got_picture) {
01492         if (decoder_reorder_pts == -1) {
01493             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
01494         } else if (decoder_reorder_pts) {
01495             *pts = frame->pkt_pts;
01496         } else {
01497             *pts = frame->pkt_dts;
01498         }
01499 
01500         if (*pts == AV_NOPTS_VALUE) {
01501             *pts = 0;
01502         }
01503 
01504         is->skip_frames_index += 1;
01505         if(is->skip_frames_index >= is->skip_frames){
01506             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
01507             return 1;
01508         }
01509 
01510     }
01511     return 0;
01512 }
01513 
01514 #if CONFIG_AVFILTER
01515 typedef struct {
01516     VideoState *is;
01517     AVFrame *frame;
01518     int use_dr1;
01519 } FilterPriv;
01520 
01521 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
01522 {
01523     AVFilterContext *ctx = codec->opaque;
01524     AVFilterBufferRef  *ref;
01525     int perms = AV_PERM_WRITE;
01526     int i, w, h, stride[4];
01527     unsigned edge;
01528     int pixel_size;
01529 
01530     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
01531         perms |= AV_PERM_NEG_LINESIZES;
01532 
01533     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
01534         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
01535         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
01536         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
01537     }
01538     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
01539 
01540     w = codec->width;
01541     h = codec->height;
01542     avcodec_align_dimensions2(codec, &w, &h, stride);
01543     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
01544     w += edge << 1;
01545     h += edge << 1;
01546 
01547     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
01548         return -1;
01549 
01550     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
01551     ref->video->w = codec->width;
01552     ref->video->h = codec->height;
01553     for(i = 0; i < 4; i ++) {
01554         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
01555         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
01556 
01557         if (ref->data[i]) {
01558             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
01559         }
01560         pic->data[i]     = ref->data[i];
01561         pic->linesize[i] = ref->linesize[i];
01562     }
01563     pic->opaque = ref;
01564     pic->age    = INT_MAX;
01565     pic->type   = FF_BUFFER_TYPE_USER;
01566     pic->reordered_opaque = codec->reordered_opaque;
01567     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
01568     else           pic->pkt_pts = AV_NOPTS_VALUE;
01569     return 0;
01570 }
01571 
01572 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
01573 {
01574     memset(pic->data, 0, sizeof(pic->data));
01575     avfilter_unref_buffer(pic->opaque);
01576 }
01577 
01578 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
01579 {
01580     AVFilterBufferRef *ref = pic->opaque;
01581 
01582     if (pic->data[0] == NULL) {
01583         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
01584         return codec->get_buffer(codec, pic);
01585     }
01586 
01587     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
01588         (codec->pix_fmt != ref->format)) {
01589         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
01590         return -1;
01591     }
01592 
01593     pic->reordered_opaque = codec->reordered_opaque;
01594     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
01595     else           pic->pkt_pts = AV_NOPTS_VALUE;
01596     return 0;
01597 }
01598 
01599 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
01600 {
01601     FilterPriv *priv = ctx->priv;
01602     AVCodecContext *codec;
01603     if(!opaque) return -1;
01604 
01605     priv->is = opaque;
01606     codec    = priv->is->video_st->codec;
01607     codec->opaque = ctx;
01608     if(codec->codec->capabilities & CODEC_CAP_DR1) {
01609         priv->use_dr1 = 1;
01610         codec->get_buffer     = input_get_buffer;
01611         codec->release_buffer = input_release_buffer;
01612         codec->reget_buffer   = input_reget_buffer;
01613         codec->thread_safe_callbacks = 1;
01614     }
01615 
01616     priv->frame = avcodec_alloc_frame();
01617 
01618     return 0;
01619 }
01620 
01621 static void input_uninit(AVFilterContext *ctx)
01622 {
01623     FilterPriv *priv = ctx->priv;
01624     av_free(priv->frame);
01625 }
01626 
01627 static int input_request_frame(AVFilterLink *link)
01628 {
01629     FilterPriv *priv = link->src->priv;
01630     AVFilterBufferRef *picref;
01631     int64_t pts = 0;
01632     AVPacket pkt;
01633     int ret;
01634 
01635     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
01636         av_free_packet(&pkt);
01637     if (ret < 0)
01638         return -1;
01639 
01640     if(priv->use_dr1) {
01641         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
01642     } else {
01643         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
01644         av_image_copy(picref->data, picref->linesize,
01645                       priv->frame->data, priv->frame->linesize,
01646                       picref->format, link->w, link->h);
01647     }
01648     av_free_packet(&pkt);
01649 
01650     picref->pts = pts;
01651     picref->pos = pkt.pos;
01652     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
01653     avfilter_start_frame(link, picref);
01654     avfilter_draw_slice(link, 0, link->h, 1);
01655     avfilter_end_frame(link);
01656 
01657     return 0;
01658 }
01659 
01660 static int input_query_formats(AVFilterContext *ctx)
01661 {
01662     FilterPriv *priv = ctx->priv;
01663     enum PixelFormat pix_fmts[] = {
01664         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
01665     };
01666 
01667     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
01668     return 0;
01669 }
01670 
01671 static int input_config_props(AVFilterLink *link)
01672 {
01673     FilterPriv *priv  = link->src->priv;
01674     AVCodecContext *c = priv->is->video_st->codec;
01675 
01676     link->w = c->width;
01677     link->h = c->height;
01678     link->time_base = priv->is->video_st->time_base;
01679 
01680     return 0;
01681 }
01682 
01683 static AVFilter input_filter =
01684 {
01685     .name      = "ffplay_input",
01686 
01687     .priv_size = sizeof(FilterPriv),
01688 
01689     .init      = input_init,
01690     .uninit    = input_uninit,
01691 
01692     .query_formats = input_query_formats,
01693 
01694     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
01695     .outputs   = (AVFilterPad[]) {{ .name = "default",
01696                                     .type = AVMEDIA_TYPE_VIDEO,
01697                                     .request_frame = input_request_frame,
01698                                     .config_props  = input_config_props, },
01699                                   { .name = NULL }},
01700 };
01701 
01702 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
01703 {
01704     char sws_flags_str[128];
01705     int ret;
01706     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
01707     AVFilterContext *filt_src = NULL, *filt_out = NULL;
01708     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
01709     graph->scale_sws_opts = av_strdup(sws_flags_str);
01710 
01711     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
01712                                             NULL, is, graph)) < 0)
01713         goto the_end;
01714     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
01715                                             NULL, &ffsink_ctx, graph)) < 0)
01716         goto the_end;
01717 
01718     if(vfilters) {
01719         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
01720         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
01721 
01722         outputs->name    = av_strdup("in");
01723         outputs->filter_ctx = filt_src;
01724         outputs->pad_idx = 0;
01725         outputs->next    = NULL;
01726 
01727         inputs->name    = av_strdup("out");
01728         inputs->filter_ctx = filt_out;
01729         inputs->pad_idx = 0;
01730         inputs->next    = NULL;
01731 
01732         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
01733             goto the_end;
01734         av_freep(&vfilters);
01735     } else {
01736         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
01737             goto the_end;
01738     }
01739 
01740     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
01741         goto the_end;
01742 
01743     is->out_video_filter = filt_out;
01744 the_end:
01745     return ret;
01746 }
01747 
01748 #endif  /* CONFIG_AVFILTER */
01749 
01750 static int video_thread(void *arg)
01751 {
01752     VideoState *is = arg;
01753     AVFrame *frame= avcodec_alloc_frame();
01754     int64_t pts_int;
01755     double pts;
01756     int ret;
01757 
01758 #if CONFIG_AVFILTER
01759     AVFilterGraph *graph = avfilter_graph_alloc();
01760     AVFilterContext *filt_out = NULL;
01761     int64_t pos;
01762 
01763     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
01764         goto the_end;
01765     filt_out = is->out_video_filter;
01766 #endif
01767 
01768     for(;;) {
01769 #if !CONFIG_AVFILTER
01770         AVPacket pkt;
01771 #else
01772         AVFilterBufferRef *picref;
01773         AVRational tb;
01774 #endif
01775         while (is->paused && !is->videoq.abort_request)
01776             SDL_Delay(10);
01777 #if CONFIG_AVFILTER
01778         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
01779         if (picref) {
01780             pts_int = picref->pts;
01781             pos     = picref->pos;
01782             frame->opaque = picref;
01783         }
01784 
01785         if (av_cmp_q(tb, is->video_st->time_base)) {
01786             av_unused int64_t pts1 = pts_int;
01787             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
01788             av_dlog(NULL, "video_thread(): "
01789                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
01790                     tb.num, tb.den, pts1,
01791                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
01792         }
01793 #else
01794         ret = get_video_frame(is, frame, &pts_int, &pkt);
01795 #endif
01796 
01797         if (ret < 0) goto the_end;
01798 
01799         if (!ret)
01800             continue;
01801 
01802         pts = pts_int*av_q2d(is->video_st->time_base);
01803 
01804 #if CONFIG_AVFILTER
01805         ret = output_picture2(is, frame, pts, pos);
01806 #else
01807         ret = output_picture2(is, frame, pts,  pkt.pos);
01808         av_free_packet(&pkt);
01809 #endif
01810         if (ret < 0)
01811             goto the_end;
01812 
01813         if (step)
01814             if (cur_stream)
01815                 stream_pause(cur_stream);
01816     }
01817  the_end:
01818 #if CONFIG_AVFILTER
01819     avfilter_graph_free(&graph);
01820 #endif
01821     av_free(frame);
01822     return 0;
01823 }
01824 
01825 static int subtitle_thread(void *arg)
01826 {
01827     VideoState *is = arg;
01828     SubPicture *sp;
01829     AVPacket pkt1, *pkt = &pkt1;
01830     int len1, got_subtitle;
01831     double pts;
01832     int i, j;
01833     int r, g, b, y, u, v, a;
01834 
01835     for(;;) {
01836         while (is->paused && !is->subtitleq.abort_request) {
01837             SDL_Delay(10);
01838         }
01839         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
01840             break;
01841 
01842         if(pkt->data == flush_pkt.data){
01843             avcodec_flush_buffers(is->subtitle_st->codec);
01844             continue;
01845         }
01846         SDL_LockMutex(is->subpq_mutex);
01847         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
01848                !is->subtitleq.abort_request) {
01849             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
01850         }
01851         SDL_UnlockMutex(is->subpq_mutex);
01852 
01853         if (is->subtitleq.abort_request)
01854             goto the_end;
01855 
01856         sp = &is->subpq[is->subpq_windex];
01857 
01858        /* NOTE: ipts is the PTS of the _first_ picture beginning in
01859            this packet, if any */
01860         pts = 0;
01861         if (pkt->pts != AV_NOPTS_VALUE)
01862             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
01863 
01864         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
01865                                     &sp->sub, &got_subtitle,
01866                                     pkt);
01867         if (got_subtitle && sp->sub.format == 0) {
01868             sp->pts = pts;
01869 
01870             for (i = 0; i < sp->sub.num_rects; i++)
01871             {
01872                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
01873                 {
01874                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
01875                     y = RGB_TO_Y_CCIR(r, g, b);
01876                     u = RGB_TO_U_CCIR(r, g, b, 0);
01877                     v = RGB_TO_V_CCIR(r, g, b, 0);
01878                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
01879                 }
01880             }
01881 
01882             /* now we can update the picture count */
01883             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
01884                 is->subpq_windex = 0;
01885             SDL_LockMutex(is->subpq_mutex);
01886             is->subpq_size++;
01887             SDL_UnlockMutex(is->subpq_mutex);
01888         }
01889         av_free_packet(pkt);
01890     }
01891  the_end:
01892     return 0;
01893 }
01894 
01895 /* copy samples for viewing in editor window */
01896 static void update_sample_display(VideoState *is, short *samples, int samples_size)
01897 {
01898     int size, len, channels;
01899 
01900     channels = is->audio_st->codec->channels;
01901 
01902     size = samples_size / sizeof(short);
01903     while (size > 0) {
01904         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
01905         if (len > size)
01906             len = size;
01907         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
01908         samples += len;
01909         is->sample_array_index += len;
01910         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
01911             is->sample_array_index = 0;
01912         size -= len;
01913     }
01914 }
01915 
01916 /* return the new audio buffer size (samples can be added or deleted
01917    to get better sync if video or external master clock) */
01918 static int synchronize_audio(VideoState *is, short *samples,
01919                              int samples_size1, double pts)
01920 {
01921     int n, samples_size;
01922     double ref_clock;
01923 
01924     n = 2 * is->audio_st->codec->channels;
01925     samples_size = samples_size1;
01926 
01927     /* if not master, then we try to remove or add samples to correct the clock */
01928     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
01929          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01930         double diff, avg_diff;
01931         int wanted_size, min_size, max_size, nb_samples;
01932 
01933         ref_clock = get_master_clock(is);
01934         diff = get_audio_clock(is) - ref_clock;
01935 
01936         if (diff < AV_NOSYNC_THRESHOLD) {
01937             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
01938             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
01939                 /* not enough measures to have a correct estimate */
01940                 is->audio_diff_avg_count++;
01941             } else {
01942                 /* estimate the A-V difference */
01943                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
01944 
01945                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
01946                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
01947                     nb_samples = samples_size / n;
01948 
01949                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01950                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
01951                     if (wanted_size < min_size)
01952                         wanted_size = min_size;
01953                     else if (wanted_size > max_size)
01954                         wanted_size = max_size;
01955 
01956                     /* add or remove samples to correction the synchro */
01957                     if (wanted_size < samples_size) {
01958                         /* remove samples */
01959                         samples_size = wanted_size;
01960                     } else if (wanted_size > samples_size) {
01961                         uint8_t *samples_end, *q;
01962                         int nb;
01963 
01964                         /* add samples */
01965                         nb = (samples_size - wanted_size);
01966                         samples_end = (uint8_t *)samples + samples_size - n;
01967                         q = samples_end + n;
01968                         while (nb > 0) {
01969                             memcpy(q, samples_end, n);
01970                             q += n;
01971                             nb -= n;
01972                         }
01973                         samples_size = wanted_size;
01974                     }
01975                 }
01976                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
01977                         diff, avg_diff, samples_size - samples_size1,
01978                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
01979             }
01980         } else {
01981             /* too big difference : may be initial PTS errors, so
01982                reset A-V filter */
01983             is->audio_diff_avg_count = 0;
01984             is->audio_diff_cum = 0;
01985         }
01986     }
01987 
01988     return samples_size;
01989 }
01990 
01991 /* decode one audio frame and returns its uncompressed size */
01992 static int audio_decode_frame(VideoState *is, double *pts_ptr)
01993 {
01994     AVPacket *pkt_temp = &is->audio_pkt_temp;
01995     AVPacket *pkt = &is->audio_pkt;
01996     AVCodecContext *dec= is->audio_st->codec;
01997     int n, len1, data_size;
01998     double pts;
01999 
02000     for(;;) {
02001         /* NOTE: the audio packet can contain several frames */
02002         while (pkt_temp->size > 0) {
02003             data_size = sizeof(is->audio_buf1);
02004             len1 = avcodec_decode_audio3(dec,
02005                                         (int16_t *)is->audio_buf1, &data_size,
02006                                         pkt_temp);
02007             if (len1 < 0) {
02008                 /* if error, we skip the frame */
02009                 pkt_temp->size = 0;
02010                 break;
02011             }
02012 
02013             pkt_temp->data += len1;
02014             pkt_temp->size -= len1;
02015             if (data_size <= 0)
02016                 continue;
02017 
02018             if (dec->sample_fmt != is->audio_src_fmt) {
02019                 if (is->reformat_ctx)
02020                     av_audio_convert_free(is->reformat_ctx);
02021                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
02022                                                          dec->sample_fmt, 1, NULL, 0);
02023                 if (!is->reformat_ctx) {
02024                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
02025                         av_get_sample_fmt_name(dec->sample_fmt),
02026                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
02027                         break;
02028                 }
02029                 is->audio_src_fmt= dec->sample_fmt;
02030             }
02031 
02032             if (is->reformat_ctx) {
02033                 const void *ibuf[6]= {is->audio_buf1};
02034                 void *obuf[6]= {is->audio_buf2};
02035                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
02036                 int ostride[6]= {2};
02037                 int len= data_size/istride[0];
02038                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
02039                     printf("av_audio_convert() failed\n");
02040                     break;
02041                 }
02042                 is->audio_buf= is->audio_buf2;
02043                 /* FIXME: existing code assume that data_size equals framesize*channels*2
02044                           remove this legacy cruft */
02045                 data_size= len*2;
02046             }else{
02047                 is->audio_buf= is->audio_buf1;
02048             }
02049 
02050             /* if no pts, then compute it */
02051             pts = is->audio_clock;
02052             *pts_ptr = pts;
02053             n = 2 * dec->channels;
02054             is->audio_clock += (double)data_size /
02055                 (double)(n * dec->sample_rate);
02056 #ifdef DEBUG
02057             {
02058                 static double last_clock;
02059                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
02060                        is->audio_clock - last_clock,
02061                        is->audio_clock, pts);
02062                 last_clock = is->audio_clock;
02063             }
02064 #endif
02065             return data_size;
02066         }
02067 
02068         /* free the current packet */
02069         if (pkt->data)
02070             av_free_packet(pkt);
02071 
02072         if (is->paused || is->audioq.abort_request) {
02073             return -1;
02074         }
02075 
02076         /* read next packet */
02077         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
02078             return -1;
02079         if(pkt->data == flush_pkt.data){
02080             avcodec_flush_buffers(dec);
02081             continue;
02082         }
02083 
02084         pkt_temp->data = pkt->data;
02085         pkt_temp->size = pkt->size;
02086 
02087         /* if update the audio clock with the pts */
02088         if (pkt->pts != AV_NOPTS_VALUE) {
02089             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
02090         }
02091     }
02092 }
02093 
02094 /* prepare a new audio buffer */
02095 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
02096 {
02097     VideoState *is = opaque;
02098     int audio_size, len1;
02099     double pts;
02100 
02101     audio_callback_time = av_gettime();
02102 
02103     while (len > 0) {
02104         if (is->audio_buf_index >= is->audio_buf_size) {
02105            audio_size = audio_decode_frame(is, &pts);
02106            if (audio_size < 0) {
02107                 /* if error, just output silence */
02108                is->audio_buf = is->audio_buf1;
02109                is->audio_buf_size = 1024;
02110                memset(is->audio_buf, 0, is->audio_buf_size);
02111            } else {
02112                if (is->show_audio)
02113                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
02114                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
02115                                               pts);
02116                is->audio_buf_size = audio_size;
02117            }
02118            is->audio_buf_index = 0;
02119         }
02120         len1 = is->audio_buf_size - is->audio_buf_index;
02121         if (len1 > len)
02122             len1 = len;
02123         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
02124         len -= len1;
02125         stream += len1;
02126         is->audio_buf_index += len1;
02127     }
02128 }
02129 
02130 /* open a given stream. Return 0 if OK */
02131 static int stream_component_open(VideoState *is, int stream_index)
02132 {
02133     AVFormatContext *ic = is->ic;
02134     AVCodecContext *avctx;
02135     AVCodec *codec;
02136     SDL_AudioSpec wanted_spec, spec;
02137 
02138     if (stream_index < 0 || stream_index >= ic->nb_streams)
02139         return -1;
02140     avctx = ic->streams[stream_index]->codec;
02141 
02142     /* prepare audio output */
02143     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02144         if (avctx->channels > 0) {
02145             avctx->request_channels = FFMIN(2, avctx->channels);
02146         } else {
02147             avctx->request_channels = 2;
02148         }
02149     }
02150 
02151     codec = avcodec_find_decoder(avctx->codec_id);
02152     avctx->debug_mv = debug_mv;
02153     avctx->debug = debug;
02154     avctx->workaround_bugs = workaround_bugs;
02155     avctx->lowres = lowres;
02156     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
02157     avctx->idct_algo= idct;
02158     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
02159     avctx->skip_frame= skip_frame;
02160     avctx->skip_idct= skip_idct;
02161     avctx->skip_loop_filter= skip_loop_filter;
02162     avctx->error_recognition= error_recognition;
02163     avctx->error_concealment= error_concealment;
02164     avctx->thread_count= thread_count;
02165 
02166     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
02167 
02168     if (!codec ||
02169         avcodec_open(avctx, codec) < 0)
02170         return -1;
02171 
02172     /* prepare audio output */
02173     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02174         wanted_spec.freq = avctx->sample_rate;
02175         wanted_spec.format = AUDIO_S16SYS;
02176         wanted_spec.channels = avctx->channels;
02177         wanted_spec.silence = 0;
02178         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
02179         wanted_spec.callback = sdl_audio_callback;
02180         wanted_spec.userdata = is;
02181         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
02182             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
02183             return -1;
02184         }
02185         is->audio_hw_buf_size = spec.size;
02186         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
02187     }
02188 
02189     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
02190     switch(avctx->codec_type) {
02191     case AVMEDIA_TYPE_AUDIO:
02192         is->audio_stream = stream_index;
02193         is->audio_st = ic->streams[stream_index];
02194         is->audio_buf_size = 0;
02195         is->audio_buf_index = 0;
02196 
02197         /* init averaging filter */
02198         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
02199         is->audio_diff_avg_count = 0;
02200         /* since we do not have a precise anough audio fifo fullness,
02201            we correct audio sync only if larger than this threshold */
02202         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
02203 
02204         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
02205         packet_queue_init(&is->audioq);
02206         SDL_PauseAudio(0);
02207         break;
02208     case AVMEDIA_TYPE_VIDEO:
02209         is->video_stream = stream_index;
02210         is->video_st = ic->streams[stream_index];
02211 
02212         packet_queue_init(&is->videoq);
02213         is->video_tid = SDL_CreateThread(video_thread, is);
02214         break;
02215     case AVMEDIA_TYPE_SUBTITLE:
02216         is->subtitle_stream = stream_index;
02217         is->subtitle_st = ic->streams[stream_index];
02218         packet_queue_init(&is->subtitleq);
02219 
02220         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
02221         break;
02222     default:
02223         break;
02224     }
02225     return 0;
02226 }
02227 
02228 static void stream_component_close(VideoState *is, int stream_index)
02229 {
02230     AVFormatContext *ic = is->ic;
02231     AVCodecContext *avctx;
02232 
02233     if (stream_index < 0 || stream_index >= ic->nb_streams)
02234         return;
02235     avctx = ic->streams[stream_index]->codec;
02236 
02237     switch(avctx->codec_type) {
02238     case AVMEDIA_TYPE_AUDIO:
02239         packet_queue_abort(&is->audioq);
02240 
02241         SDL_CloseAudio();
02242 
02243         packet_queue_end(&is->audioq);
02244         if (is->reformat_ctx)
02245             av_audio_convert_free(is->reformat_ctx);
02246         is->reformat_ctx = NULL;
02247         break;
02248     case AVMEDIA_TYPE_VIDEO:
02249         packet_queue_abort(&is->videoq);
02250 
02251         /* note: we also signal this mutex to make sure we deblock the
02252            video thread in all cases */
02253         SDL_LockMutex(is->pictq_mutex);
02254         SDL_CondSignal(is->pictq_cond);
02255         SDL_UnlockMutex(is->pictq_mutex);
02256 
02257         SDL_WaitThread(is->video_tid, NULL);
02258 
02259         packet_queue_end(&is->videoq);
02260         break;
02261     case AVMEDIA_TYPE_SUBTITLE:
02262         packet_queue_abort(&is->subtitleq);
02263 
02264         /* note: we also signal this mutex to make sure we deblock the
02265            video thread in all cases */
02266         SDL_LockMutex(is->subpq_mutex);
02267         is->subtitle_stream_changed = 1;
02268 
02269         SDL_CondSignal(is->subpq_cond);
02270         SDL_UnlockMutex(is->subpq_mutex);
02271 
02272         SDL_WaitThread(is->subtitle_tid, NULL);
02273 
02274         packet_queue_end(&is->subtitleq);
02275         break;
02276     default:
02277         break;
02278     }
02279 
02280     ic->streams[stream_index]->discard = AVDISCARD_ALL;
02281     avcodec_close(avctx);
02282     switch(avctx->codec_type) {
02283     case AVMEDIA_TYPE_AUDIO:
02284         is->audio_st = NULL;
02285         is->audio_stream = -1;
02286         break;
02287     case AVMEDIA_TYPE_VIDEO:
02288         is->video_st = NULL;
02289         is->video_stream = -1;
02290         break;
02291     case AVMEDIA_TYPE_SUBTITLE:
02292         is->subtitle_st = NULL;
02293         is->subtitle_stream = -1;
02294         break;
02295     default:
02296         break;
02297     }
02298 }
02299 
02300 /* since we have only one decoding thread, we can use a global
02301    variable instead of a thread local variable */
02302 static VideoState *global_video_state;
02303 
02304 static int decode_interrupt_cb(void)
02305 {
02306     return (global_video_state && global_video_state->abort_request);
02307 }
02308 
02309 /* this thread gets the stream from the disk or the network */
02310 static int decode_thread(void *arg)
02311 {
02312     VideoState *is = arg;
02313     AVFormatContext *ic = NULL;
02314     int err, i, ret;
02315     int st_index[AVMEDIA_TYPE_NB];
02316     AVPacket pkt1, *pkt = &pkt1;
02317     int eof=0;
02318     int pkt_in_play_range = 0;
02319     AVDictionaryEntry *t;
02320 
02321     memset(st_index, -1, sizeof(st_index));
02322     is->video_stream = -1;
02323     is->audio_stream = -1;
02324     is->subtitle_stream = -1;
02325 
02326     global_video_state = is;
02327     avio_set_interrupt_cb(decode_interrupt_cb);
02328 
02329     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
02330     if (err < 0) {
02331         print_error(is->filename, err);
02332         ret = -1;
02333         goto fail;
02334     }
02335     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
02336         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
02337         ret = AVERROR_OPTION_NOT_FOUND;
02338         goto fail;
02339     }
02340     is->ic = ic;
02341 
02342     if(genpts)
02343         ic->flags |= AVFMT_FLAG_GENPTS;
02344 
02345     /* Set AVCodecContext options so they will be seen by av_find_stream_info() */
02346     for (i = 0; i < ic->nb_streams; i++) {
02347         AVCodecContext *dec = ic->streams[i]->codec;
02348         switch (dec->codec_type) {
02349         case AVMEDIA_TYPE_AUDIO:
02350             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO],
02351                              AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
02352                              NULL);
02353             break;
02354         case AVMEDIA_TYPE_VIDEO:
02355             set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_VIDEO],
02356                              AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM,
02357                              NULL);
02358             break;
02359         }
02360     }
02361 
02362     err = av_find_stream_info(ic);
02363     if (err < 0) {
02364         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
02365         ret = -1;
02366         goto fail;
02367     }
02368     if(ic->pb)
02369         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
02370 
02371     if(seek_by_bytes<0)
02372         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
02373 
02374     /* if seeking requested, we execute it */
02375     if (start_time != AV_NOPTS_VALUE) {
02376         int64_t timestamp;
02377 
02378         timestamp = start_time;
02379         /* add the stream start time */
02380         if (ic->start_time != AV_NOPTS_VALUE)
02381             timestamp += ic->start_time;
02382         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
02383         if (ret < 0) {
02384             fprintf(stderr, "%s: could not seek to position %0.3f\n",
02385                     is->filename, (double)timestamp / AV_TIME_BASE);
02386         }
02387     }
02388 
02389     for (i = 0; i < ic->nb_streams; i++)
02390         ic->streams[i]->discard = AVDISCARD_ALL;
02391     if (!video_disable)
02392         st_index[AVMEDIA_TYPE_VIDEO] =
02393             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
02394                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
02395     if (!audio_disable)
02396         st_index[AVMEDIA_TYPE_AUDIO] =
02397             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
02398                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
02399                                 st_index[AVMEDIA_TYPE_VIDEO],
02400                                 NULL, 0);
02401     if (!video_disable)
02402         st_index[AVMEDIA_TYPE_SUBTITLE] =
02403             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
02404                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
02405                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
02406                                  st_index[AVMEDIA_TYPE_AUDIO] :
02407                                  st_index[AVMEDIA_TYPE_VIDEO]),
02408                                 NULL, 0);
02409     if (show_status) {
02410         av_dump_format(ic, 0, is->filename, 0);
02411     }
02412 
02413     /* open the streams */
02414     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
02415         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
02416     }
02417 
02418     ret=-1;
02419     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
02420         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
02421     }
02422     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
02423     if(ret<0) {
02424         if (!display_disable)
02425             is->show_audio = 2;
02426     }
02427 
02428     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
02429         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
02430     }
02431 
02432     if (is->video_stream < 0 && is->audio_stream < 0) {
02433         fprintf(stderr, "%s: could not open codecs\n", is->filename);
02434         ret = -1;
02435         goto fail;
02436     }
02437 
02438     for(;;) {
02439         if (is->abort_request)
02440             break;
02441         if (is->paused != is->last_paused) {
02442             is->last_paused = is->paused;
02443             if (is->paused)
02444                 is->read_pause_return= av_read_pause(ic);
02445             else
02446                 av_read_play(ic);
02447         }
02448 #if CONFIG_RTSP_DEMUXER
02449         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
02450             /* wait 10 ms to avoid trying to get another packet */
02451             /* XXX: horrible */
02452             SDL_Delay(10);
02453             continue;
02454         }
02455 #endif
02456         if (is->seek_req) {
02457             int64_t seek_target= is->seek_pos;
02458             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
02459             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
02460 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
02461 //      of the seek_pos/seek_rel variables
02462 
02463             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
02464             if (ret < 0) {
02465                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
02466             }else{
02467                 if (is->audio_stream >= 0) {
02468                     packet_queue_flush(&is->audioq);
02469                     packet_queue_put(&is->audioq, &flush_pkt);
02470                 }
02471                 if (is->subtitle_stream >= 0) {
02472                     packet_queue_flush(&is->subtitleq);
02473                     packet_queue_put(&is->subtitleq, &flush_pkt);
02474                 }
02475                 if (is->video_stream >= 0) {
02476                     packet_queue_flush(&is->videoq);
02477                     packet_queue_put(&is->videoq, &flush_pkt);
02478                 }
02479             }
02480             is->seek_req = 0;
02481             eof= 0;
02482         }
02483 
02484         /* if the queue are full, no need to read more */
02485         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
02486             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
02487                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
02488                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
02489             /* wait 10 ms */
02490             SDL_Delay(10);
02491             continue;
02492         }
02493         if(eof) {
02494             if(is->video_stream >= 0){
02495                 av_init_packet(pkt);
02496                 pkt->data=NULL;
02497                 pkt->size=0;
02498                 pkt->stream_index= is->video_stream;
02499                 packet_queue_put(&is->videoq, pkt);
02500             }
02501             SDL_Delay(10);
02502             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
02503                 if(loop!=1 && (!loop || --loop)){
02504                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
02505                 }else if(autoexit){
02506                     ret=AVERROR_EOF;
02507                     goto fail;
02508                 }
02509             }
02510             continue;
02511         }
02512         ret = av_read_frame(ic, pkt);
02513         if (ret < 0) {
02514             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
02515                 eof=1;
02516             if (ic->pb && ic->pb->error)
02517                 break;
02518             SDL_Delay(100); /* wait for user event */
02519             continue;
02520         }
02521         /* check if packet is in play range specified by user, then queue, otherwise discard */
02522         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
02523                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
02524                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
02525                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
02526                 <= ((double)duration/1000000);
02527         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
02528             packet_queue_put(&is->audioq, pkt);
02529         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
02530             packet_queue_put(&is->videoq, pkt);
02531         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
02532             packet_queue_put(&is->subtitleq, pkt);
02533         } else {
02534             av_free_packet(pkt);
02535         }
02536     }
02537     /* wait until the end */
02538     while (!is->abort_request) {
02539         SDL_Delay(100);
02540     }
02541 
02542     ret = 0;
02543  fail:
02544     /* disable interrupting */
02545     global_video_state = NULL;
02546 
02547     /* close each stream */
02548     if (is->audio_stream >= 0)
02549         stream_component_close(is, is->audio_stream);
02550     if (is->video_stream >= 0)
02551         stream_component_close(is, is->video_stream);
02552     if (is->subtitle_stream >= 0)
02553         stream_component_close(is, is->subtitle_stream);
02554     if (is->ic) {
02555         av_close_input_file(is->ic);
02556         is->ic = NULL; /* safety */
02557     }
02558     avio_set_interrupt_cb(NULL);
02559 
02560     if (ret != 0) {
02561         SDL_Event event;
02562 
02563         event.type = FF_QUIT_EVENT;
02564         event.user.data1 = is;
02565         SDL_PushEvent(&event);
02566     }
02567     return 0;
02568 }
02569 
02570 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
02571 {
02572     VideoState *is;
02573 
02574     is = av_mallocz(sizeof(VideoState));
02575     if (!is)
02576         return NULL;
02577     av_strlcpy(is->filename, filename, sizeof(is->filename));
02578     is->iformat = iformat;
02579     is->ytop = 0;
02580     is->xleft = 0;
02581 
02582     /* start video display */
02583     is->pictq_mutex = SDL_CreateMutex();
02584     is->pictq_cond = SDL_CreateCond();
02585 
02586     is->subpq_mutex = SDL_CreateMutex();
02587     is->subpq_cond = SDL_CreateCond();
02588 
02589     is->av_sync_type = av_sync_type;
02590     is->parse_tid = SDL_CreateThread(decode_thread, is);
02591     if (!is->parse_tid) {
02592         av_free(is);
02593         return NULL;
02594     }
02595     return is;
02596 }
02597 
02598 static void stream_cycle_channel(VideoState *is, int codec_type)
02599 {
02600     AVFormatContext *ic = is->ic;
02601     int start_index, stream_index;
02602     AVStream *st;
02603 
02604     if (codec_type == AVMEDIA_TYPE_VIDEO)
02605         start_index = is->video_stream;
02606     else if (codec_type == AVMEDIA_TYPE_AUDIO)
02607         start_index = is->audio_stream;
02608     else
02609         start_index = is->subtitle_stream;
02610     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
02611         return;
02612     stream_index = start_index;
02613     for(;;) {
02614         if (++stream_index >= is->ic->nb_streams)
02615         {
02616             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
02617             {
02618                 stream_index = -1;
02619                 goto the_end;
02620             } else
02621                 stream_index = 0;
02622         }
02623         if (stream_index == start_index)
02624             return;
02625         st = ic->streams[stream_index];
02626         if (st->codec->codec_type == codec_type) {
02627             /* check that parameters are OK */
02628             switch(codec_type) {
02629             case AVMEDIA_TYPE_AUDIO:
02630                 if (st->codec->sample_rate != 0 &&
02631                     st->codec->channels != 0)
02632                     goto the_end;
02633                 break;
02634             case AVMEDIA_TYPE_VIDEO:
02635             case AVMEDIA_TYPE_SUBTITLE:
02636                 goto the_end;
02637             default:
02638                 break;
02639             }
02640         }
02641     }
02642  the_end:
02643     stream_component_close(is, start_index);
02644     stream_component_open(is, stream_index);
02645 }
02646 
02647 
02648 static void toggle_full_screen(void)
02649 {
02650     is_full_screen = !is_full_screen;
02651     video_open(cur_stream);
02652 }
02653 
02654 static void toggle_pause(void)
02655 {
02656     if (cur_stream)
02657         stream_pause(cur_stream);
02658     step = 0;
02659 }
02660 
02661 static void step_to_next_frame(void)
02662 {
02663     if (cur_stream) {
02664         /* if the stream is paused unpause it, then step */
02665         if (cur_stream->paused)
02666             stream_pause(cur_stream);
02667     }
02668     step = 1;
02669 }
02670 
02671 static void toggle_audio_display(void)
02672 {
02673     if (cur_stream) {
02674         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
02675         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
02676         fill_rectangle(screen,
02677                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
02678                     bgcolor);
02679         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
02680     }
02681 }
02682 
02683 /* handle an event sent by the GUI */
02684 static void event_loop(void)
02685 {
02686     SDL_Event event;
02687     double incr, pos, frac;
02688 
02689     for(;;) {
02690         double x;
02691         SDL_WaitEvent(&event);
02692         switch(event.type) {
02693         case SDL_KEYDOWN:
02694             if (exit_on_keydown) {
02695                 do_exit();
02696                 break;
02697             }
02698             switch(event.key.keysym.sym) {
02699             case SDLK_ESCAPE:
02700             case SDLK_q:
02701                 do_exit();
02702                 break;
02703             case SDLK_f:
02704                 toggle_full_screen();
02705                 break;
02706             case SDLK_p:
02707             case SDLK_SPACE:
02708                 toggle_pause();
02709                 break;
02710             case SDLK_s: //S: Step to next frame
02711                 step_to_next_frame();
02712                 break;
02713             case SDLK_a:
02714                 if (cur_stream)
02715                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
02716                 break;
02717             case SDLK_v:
02718                 if (cur_stream)
02719                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
02720                 break;
02721             case SDLK_t:
02722                 if (cur_stream)
02723                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
02724                 break;
02725             case SDLK_w:
02726                 toggle_audio_display();
02727                 break;
02728             case SDLK_LEFT:
02729                 incr = -10.0;
02730                 goto do_seek;
02731             case SDLK_RIGHT:
02732                 incr = 10.0;
02733                 goto do_seek;
02734             case SDLK_UP:
02735                 incr = 60.0;
02736                 goto do_seek;
02737             case SDLK_DOWN:
02738                 incr = -60.0;
02739             do_seek:
02740                 if (cur_stream) {
02741                     if (seek_by_bytes) {
02742                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
02743                             pos= cur_stream->video_current_pos;
02744                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
02745                             pos= cur_stream->audio_pkt.pos;
02746                         }else
02747                             pos = avio_tell(cur_stream->ic->pb);
02748                         if (cur_stream->ic->bit_rate)
02749                             incr *= cur_stream->ic->bit_rate / 8.0;
02750                         else
02751                             incr *= 180000.0;
02752                         pos += incr;
02753                         stream_seek(cur_stream, pos, incr, 1);
02754                     } else {
02755                         pos = get_master_clock(cur_stream);
02756                         pos += incr;
02757                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
02758                     }
02759                 }
02760                 break;
02761             default:
02762                 break;
02763             }
02764             break;
02765         case SDL_MOUSEBUTTONDOWN:
02766             if (exit_on_mousedown) {
02767                 do_exit();
02768                 break;
02769             }
02770         case SDL_MOUSEMOTION:
02771             if(event.type ==SDL_MOUSEBUTTONDOWN){
02772                 x= event.button.x;
02773             }else{
02774                 if(event.motion.state != SDL_PRESSED)
02775                     break;
02776                 x= event.motion.x;
02777             }
02778             if (cur_stream) {
02779                 if(seek_by_bytes || cur_stream->ic->duration<=0){
02780                     uint64_t size=  avio_size(cur_stream->ic->pb);
02781                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
02782                 }else{
02783                     int64_t ts;
02784                     int ns, hh, mm, ss;
02785                     int tns, thh, tmm, tss;
02786                     tns = cur_stream->ic->duration/1000000LL;
02787                     thh = tns/3600;
02788                     tmm = (tns%3600)/60;
02789                     tss = (tns%60);
02790                     frac = x/cur_stream->width;
02791                     ns = frac*tns;
02792                     hh = ns/3600;
02793                     mm = (ns%3600)/60;
02794                     ss = (ns%60);
02795                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
02796                             hh, mm, ss, thh, tmm, tss);
02797                     ts = frac*cur_stream->ic->duration;
02798                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
02799                         ts += cur_stream->ic->start_time;
02800                     stream_seek(cur_stream, ts, 0, 0);
02801                 }
02802             }
02803             break;
02804         case SDL_VIDEORESIZE:
02805             if (cur_stream) {
02806                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
02807                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
02808                 screen_width = cur_stream->width = event.resize.w;
02809                 screen_height= cur_stream->height= event.resize.h;
02810             }
02811             break;
02812         case SDL_QUIT:
02813         case FF_QUIT_EVENT:
02814             do_exit();
02815             break;
02816         case FF_ALLOC_EVENT:
02817             video_open(event.user.data1);
02818             alloc_picture(event.user.data1);
02819             break;
02820         case FF_REFRESH_EVENT:
02821             video_refresh_timer(event.user.data1);
02822             cur_stream->refresh=0;
02823             break;
02824         default:
02825             break;
02826         }
02827     }
02828 }
02829 
02830 static int opt_frame_size(const char *opt, const char *arg)
02831 {
02832     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
02833         fprintf(stderr, "Incorrect frame size\n");
02834         return AVERROR(EINVAL);
02835     }
02836     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
02837         fprintf(stderr, "Frame size must be a multiple of 2\n");
02838         return AVERROR(EINVAL);
02839     }
02840     return 0;
02841 }
02842 
02843 static int opt_width(const char *opt, const char *arg)
02844 {
02845     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02846     return 0;
02847 }
02848 
02849 static int opt_height(const char *opt, const char *arg)
02850 {
02851     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02852     return 0;
02853 }
02854 
02855 static int opt_format(const char *opt, const char *arg)
02856 {
02857     file_iformat = av_find_input_format(arg);
02858     if (!file_iformat) {
02859         fprintf(stderr, "Unknown input format: %s\n", arg);
02860         return AVERROR(EINVAL);
02861     }
02862     return 0;
02863 }
02864 
02865 static int opt_frame_pix_fmt(const char *opt, const char *arg)
02866 {
02867     frame_pix_fmt = av_get_pix_fmt(arg);
02868     return 0;
02869 }
02870 
02871 static int opt_sync(const char *opt, const char *arg)
02872 {
02873     if (!strcmp(arg, "audio"))
02874         av_sync_type = AV_SYNC_AUDIO_MASTER;
02875     else if (!strcmp(arg, "video"))
02876         av_sync_type = AV_SYNC_VIDEO_MASTER;
02877     else if (!strcmp(arg, "ext"))
02878         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
02879     else {
02880         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
02881         exit(1);
02882     }
02883     return 0;
02884 }
02885 
02886 static int opt_seek(const char *opt, const char *arg)
02887 {
02888     start_time = parse_time_or_die(opt, arg, 1);
02889     return 0;
02890 }
02891 
02892 static int opt_duration(const char *opt, const char *arg)
02893 {
02894     duration = parse_time_or_die(opt, arg, 1);
02895     return 0;
02896 }
02897 
02898 static int opt_debug(const char *opt, const char *arg)
02899 {
02900     av_log_set_level(99);
02901     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
02902     return 0;
02903 }
02904 
02905 static int opt_vismv(const char *opt, const char *arg)
02906 {
02907     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
02908     return 0;
02909 }
02910 
02911 static int opt_thread_count(const char *opt, const char *arg)
02912 {
02913     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
02914 #if !HAVE_THREADS
02915     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
02916 #endif
02917     return 0;
02918 }
02919 
02920 static const OptionDef options[] = {
02921 #include "cmdutils_common_opts.h"
02922     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
02923     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
02924     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
02925     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
02926     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
02927     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
02928     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
02929     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
02930     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
02931     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
02932     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
02933     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
02934     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
02935     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
02936     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
02937     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
02938     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
02939     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
02940     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
02941     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
02942     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
02943     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
02944     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
02945     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
02946     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
02947     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
02948     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
02949     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
02950     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
02951     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
02952     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
02953     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
02954     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
02955     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
02956     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
02957     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
02958     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
02959 #if CONFIG_AVFILTER
02960     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
02961 #endif
02962     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
02963     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
02964     { "i", 0, {NULL}, "ffmpeg compatibility dummy option", ""},
02965     { NULL, },
02966 };
02967 
02968 static void show_usage(void)
02969 {
02970     printf("Simple media player\n");
02971     printf("usage: ffplay [options] input_file\n");
02972     printf("\n");
02973 }
02974 
02975 static void show_help(void)
02976 {
02977     av_log_set_callback(log_callback_help);
02978     show_usage();
02979     show_help_options(options, "Main options:\n",
02980                       OPT_EXPERT, 0);
02981     show_help_options(options, "\nAdvanced options:\n",
02982                       OPT_EXPERT, OPT_EXPERT);
02983     printf("\n");
02984     av_opt_show2(avcodec_opts[0], NULL,
02985                  AV_OPT_FLAG_DECODING_PARAM, 0);
02986     printf("\n");
02987     av_opt_show2(avformat_opts, NULL,
02988                  AV_OPT_FLAG_DECODING_PARAM, 0);
02989 #if !CONFIG_AVFILTER
02990     printf("\n");
02991     av_opt_show2(sws_opts, NULL,
02992                  AV_OPT_FLAG_ENCODING_PARAM, 0);
02993 #endif
02994     printf("\nWhile playing:\n"
02995            "q, ESC              quit\n"
02996            "f                   toggle full screen\n"
02997            "p, SPC              pause\n"
02998            "a                   cycle audio channel\n"
02999            "v                   cycle video channel\n"
03000            "t                   cycle subtitle channel\n"
03001            "w                   show audio waves\n"
03002            "s                   activate frame-step mode\n"
03003            "left/right          seek backward/forward 10 seconds\n"
03004            "down/up             seek backward/forward 1 minute\n"
03005            "mouse click         seek to percentage in file corresponding to fraction of width\n"
03006            );
03007 }
03008 
03009 static void opt_input_file(const char *filename)
03010 {
03011     if (input_filename) {
03012         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
03013                 filename, input_filename);
03014         exit(1);
03015     }
03016     if (!strcmp(filename, "-"))
03017         filename = "pipe:";
03018     input_filename = filename;
03019 }
03020 
03021 /* Called from the main */
03022 int main(int argc, char **argv)
03023 {
03024     int flags;
03025 
03026     av_log_set_flags(AV_LOG_SKIP_REPEATED);
03027 
03028     /* register all codecs, demux and protocols */
03029     avcodec_register_all();
03030 #if CONFIG_AVDEVICE
03031     avdevice_register_all();
03032 #endif
03033 #if CONFIG_AVFILTER
03034     avfilter_register_all();
03035 #endif
03036     av_register_all();
03037 
03038     init_opts();
03039 
03040     show_banner();
03041 
03042     parse_options(argc, argv, options, opt_input_file);
03043 
03044     if (!input_filename) {
03045         show_usage();
03046         fprintf(stderr, "An input file must be specified\n");
03047         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
03048         exit(1);
03049     }
03050 
03051     if (display_disable) {
03052         video_disable = 1;
03053     }
03054     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
03055 #if !defined(__MINGW32__) && !defined(__APPLE__)
03056     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
03057 #endif
03058     if (SDL_Init (flags)) {
03059         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
03060         exit(1);
03061     }
03062 
03063     if (!display_disable) {
03064 #if HAVE_SDL_VIDEO_SIZE
03065         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
03066         fs_screen_width = vi->current_w;
03067         fs_screen_height = vi->current_h;
03068 #endif
03069     }
03070 
03071     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
03072     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
03073     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
03074 
03075     av_init_packet(&flush_pkt);
03076     flush_pkt.data= "FLUSH";
03077 
03078     cur_stream = stream_open(input_filename, file_iformat);
03079 
03080     event_loop();
03081 
03082     /* never returns */
03083 
03084     return 0;
03085 }