Libav 0.7.1
|
00001 /* 00002 * various utility functions for use within Libav 00003 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard 00004 * 00005 * This file is part of Libav. 00006 * 00007 * Libav is free software; you can redistribute it and/or 00008 * modify it under the terms of the GNU Lesser General Public 00009 * License as published by the Free Software Foundation; either 00010 * version 2.1 of the License, or (at your option) any later version. 00011 * 00012 * Libav is distributed in the hope that it will be useful, 00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00015 * Lesser General Public License for more details. 00016 * 00017 * You should have received a copy of the GNU Lesser General Public 00018 * License along with Libav; if not, write to the Free Software 00019 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00020 */ 00021 00022 /* #define DEBUG */ 00023 00024 #include "avformat.h" 00025 #include "avio_internal.h" 00026 #include "internal.h" 00027 #include "libavcodec/internal.h" 00028 #include "libavutil/opt.h" 00029 #include "libavutil/dict.h" 00030 #include "libavutil/pixdesc.h" 00031 #include "metadata.h" 00032 #include "id3v2.h" 00033 #include "libavutil/avstring.h" 00034 #include "riff.h" 00035 #include "audiointerleave.h" 00036 #include "url.h" 00037 #include <sys/time.h> 00038 #include <time.h> 00039 #include <strings.h> 00040 #include <stdarg.h> 00041 #if CONFIG_NETWORK 00042 #include "network.h" 00043 #endif 00044 00045 #undef NDEBUG 00046 #include <assert.h> 00047 00053 unsigned avformat_version(void) 00054 { 00055 return LIBAVFORMAT_VERSION_INT; 00056 } 00057 00058 const char *avformat_configuration(void) 00059 { 00060 return LIBAV_CONFIGURATION; 00061 } 00062 00063 const char *avformat_license(void) 00064 { 00065 #define LICENSE_PREFIX "libavformat license: " 00066 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1; 00067 } 00068 00069 /* fraction handling */ 00070 00081 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den) 00082 { 00083 num += (den >> 1); 00084 if (num >= den) { 00085 val += num / den; 00086 num = num % den; 00087 } 00088 f->val = val; 00089 f->num = num; 00090 f->den = den; 00091 } 00092 00099 static void av_frac_add(AVFrac *f, int64_t incr) 00100 { 00101 int64_t num, den; 00102 00103 num = f->num + incr; 00104 den = f->den; 00105 if (num < 0) { 00106 f->val += num / den; 00107 num = num % den; 00108 if (num < 0) { 00109 num += den; 00110 f->val--; 00111 } 00112 } else if (num >= den) { 00113 f->val += num / den; 00114 num = num % den; 00115 } 00116 f->num = num; 00117 } 00118 00120 static AVInputFormat *first_iformat = NULL; 00122 static AVOutputFormat *first_oformat = NULL; 00123 00124 AVInputFormat *av_iformat_next(AVInputFormat *f) 00125 { 00126 if(f) return f->next; 00127 else return first_iformat; 00128 } 00129 00130 AVOutputFormat *av_oformat_next(AVOutputFormat *f) 00131 { 00132 if(f) return f->next; 00133 else return first_oformat; 00134 } 00135 00136 void av_register_input_format(AVInputFormat *format) 00137 { 00138 AVInputFormat **p; 00139 p = &first_iformat; 00140 while (*p != NULL) p = &(*p)->next; 00141 *p = format; 00142 format->next = NULL; 00143 } 00144 00145 void av_register_output_format(AVOutputFormat *format) 00146 { 00147 AVOutputFormat **p; 00148 p = &first_oformat; 00149 while (*p != NULL) p = &(*p)->next; 00150 *p = format; 00151 format->next = NULL; 00152 } 00153 00154 int av_match_ext(const char *filename, const char *extensions) 00155 { 00156 const char *ext, *p; 00157 char ext1[32], *q; 00158 00159 if(!filename) 00160 return 0; 00161 00162 ext = strrchr(filename, '.'); 00163 if (ext) { 00164 ext++; 00165 p = extensions; 00166 for(;;) { 00167 q = ext1; 00168 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1) 00169 *q++ = *p++; 00170 *q = '\0'; 00171 if (!strcasecmp(ext1, ext)) 00172 return 1; 00173 if (*p == '\0') 00174 break; 00175 p++; 00176 } 00177 } 00178 return 0; 00179 } 00180 00181 static int match_format(const char *name, const char *names) 00182 { 00183 const char *p; 00184 int len, namelen; 00185 00186 if (!name || !names) 00187 return 0; 00188 00189 namelen = strlen(name); 00190 while ((p = strchr(names, ','))) { 00191 len = FFMAX(p - names, namelen); 00192 if (!strncasecmp(name, names, len)) 00193 return 1; 00194 names = p+1; 00195 } 00196 return !strcasecmp(name, names); 00197 } 00198 00199 AVOutputFormat *av_guess_format(const char *short_name, const char *filename, 00200 const char *mime_type) 00201 { 00202 AVOutputFormat *fmt = NULL, *fmt_found; 00203 int score_max, score; 00204 00205 /* specific test for image sequences */ 00206 #if CONFIG_IMAGE2_MUXER 00207 if (!short_name && filename && 00208 av_filename_number_test(filename) && 00209 ff_guess_image2_codec(filename) != CODEC_ID_NONE) { 00210 return av_guess_format("image2", NULL, NULL); 00211 } 00212 #endif 00213 /* Find the proper file type. */ 00214 fmt_found = NULL; 00215 score_max = 0; 00216 while ((fmt = av_oformat_next(fmt))) { 00217 score = 0; 00218 if (fmt->name && short_name && !strcmp(fmt->name, short_name)) 00219 score += 100; 00220 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type)) 00221 score += 10; 00222 if (filename && fmt->extensions && 00223 av_match_ext(filename, fmt->extensions)) { 00224 score += 5; 00225 } 00226 if (score > score_max) { 00227 score_max = score; 00228 fmt_found = fmt; 00229 } 00230 } 00231 return fmt_found; 00232 } 00233 00234 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, 00235 const char *filename, const char *mime_type, enum AVMediaType type){ 00236 if(type == AVMEDIA_TYPE_VIDEO){ 00237 enum CodecID codec_id= CODEC_ID_NONE; 00238 00239 #if CONFIG_IMAGE2_MUXER 00240 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){ 00241 codec_id= ff_guess_image2_codec(filename); 00242 } 00243 #endif 00244 if(codec_id == CODEC_ID_NONE) 00245 codec_id= fmt->video_codec; 00246 return codec_id; 00247 }else if(type == AVMEDIA_TYPE_AUDIO) 00248 return fmt->audio_codec; 00249 else if (type == AVMEDIA_TYPE_SUBTITLE) 00250 return fmt->subtitle_codec; 00251 else 00252 return CODEC_ID_NONE; 00253 } 00254 00255 AVInputFormat *av_find_input_format(const char *short_name) 00256 { 00257 AVInputFormat *fmt = NULL; 00258 while ((fmt = av_iformat_next(fmt))) { 00259 if (match_format(short_name, fmt->name)) 00260 return fmt; 00261 } 00262 return NULL; 00263 } 00264 00265 00266 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size) 00267 { 00268 int ret= av_new_packet(pkt, size); 00269 00270 if(ret<0) 00271 return ret; 00272 00273 pkt->pos= avio_tell(s); 00274 00275 ret= avio_read(s, pkt->data, size); 00276 if(ret<=0) 00277 av_free_packet(pkt); 00278 else 00279 av_shrink_packet(pkt, ret); 00280 00281 return ret; 00282 } 00283 00284 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size) 00285 { 00286 int ret; 00287 int old_size; 00288 if (!pkt->size) 00289 return av_get_packet(s, pkt, size); 00290 old_size = pkt->size; 00291 ret = av_grow_packet(pkt, size); 00292 if (ret < 0) 00293 return ret; 00294 ret = avio_read(s, pkt->data + old_size, size); 00295 av_shrink_packet(pkt, old_size + FFMAX(ret, 0)); 00296 return ret; 00297 } 00298 00299 00300 int av_filename_number_test(const char *filename) 00301 { 00302 char buf[1024]; 00303 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0); 00304 } 00305 00306 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max) 00307 { 00308 AVProbeData lpd = *pd; 00309 AVInputFormat *fmt1 = NULL, *fmt; 00310 int score, id3 = 0; 00311 00312 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) { 00313 int id3len = ff_id3v2_tag_len(lpd.buf); 00314 if (lpd.buf_size > id3len + 16) { 00315 lpd.buf += id3len; 00316 lpd.buf_size -= id3len; 00317 } 00318 id3 = 1; 00319 } 00320 00321 fmt = NULL; 00322 while ((fmt1 = av_iformat_next(fmt1))) { 00323 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE)) 00324 continue; 00325 score = 0; 00326 if (fmt1->read_probe) { 00327 score = fmt1->read_probe(&lpd); 00328 } else if (fmt1->extensions) { 00329 if (av_match_ext(lpd.filename, fmt1->extensions)) { 00330 score = 50; 00331 } 00332 } 00333 if (score > *score_max) { 00334 *score_max = score; 00335 fmt = fmt1; 00336 }else if (score == *score_max) 00337 fmt = NULL; 00338 } 00339 00340 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */ 00341 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4) { 00342 while ((fmt = av_iformat_next(fmt))) 00343 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) { 00344 *score_max = AVPROBE_SCORE_MAX/4; 00345 break; 00346 } 00347 } 00348 00349 return fmt; 00350 } 00351 00352 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){ 00353 int score=0; 00354 return av_probe_input_format2(pd, is_opened, &score); 00355 } 00356 00357 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score) 00358 { 00359 static const struct { 00360 const char *name; enum CodecID id; enum AVMediaType type; 00361 } fmt_id_type[] = { 00362 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO }, 00363 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO }, 00364 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO }, 00365 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO }, 00366 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO }, 00367 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO }, 00368 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO }, 00369 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO }, 00370 { 0 } 00371 }; 00372 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score); 00373 00374 if (fmt) { 00375 int i; 00376 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n", 00377 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score); 00378 for (i = 0; fmt_id_type[i].name; i++) { 00379 if (!strcmp(fmt->name, fmt_id_type[i].name)) { 00380 st->codec->codec_id = fmt_id_type[i].id; 00381 st->codec->codec_type = fmt_id_type[i].type; 00382 break; 00383 } 00384 } 00385 } 00386 return !!fmt; 00387 } 00388 00389 /************************************************************/ 00390 /* input media file */ 00391 00392 #if FF_API_FORMAT_PARAMETERS 00393 static AVDictionary *convert_format_parameters(AVFormatParameters *ap) 00394 { 00395 char buf[1024]; 00396 AVDictionary *opts = NULL; 00397 00398 if (!ap) 00399 return NULL; 00400 00401 if (ap->time_base.num) { 00402 snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num); 00403 av_dict_set(&opts, "framerate", buf, 0); 00404 } 00405 if (ap->sample_rate) { 00406 snprintf(buf, sizeof(buf), "%d", ap->sample_rate); 00407 av_dict_set(&opts, "sample_rate", buf, 0); 00408 } 00409 if (ap->channels) { 00410 snprintf(buf, sizeof(buf), "%d", ap->channels); 00411 av_dict_set(&opts, "channels", buf, 0); 00412 } 00413 if (ap->width || ap->height) { 00414 snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height); 00415 av_dict_set(&opts, "video_size", buf, 0); 00416 } 00417 if (ap->pix_fmt != PIX_FMT_NONE) { 00418 av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0); 00419 } 00420 if (ap->channel) { 00421 snprintf(buf, sizeof(buf), "%d", ap->channel); 00422 av_dict_set(&opts, "channel", buf, 0); 00423 } 00424 if (ap->standard) { 00425 av_dict_set(&opts, "standard", ap->standard, 0); 00426 } 00427 if (ap->mpeg2ts_compute_pcr) { 00428 av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0); 00429 } 00430 if (ap->initial_pause) { 00431 av_dict_set(&opts, "initial_pause", "1", 0); 00432 } 00433 return opts; 00434 } 00435 00439 int av_open_input_stream(AVFormatContext **ic_ptr, 00440 AVIOContext *pb, const char *filename, 00441 AVInputFormat *fmt, AVFormatParameters *ap) 00442 { 00443 int err; 00444 AVDictionary *opts; 00445 AVFormatContext *ic; 00446 AVFormatParameters default_ap; 00447 00448 if(!ap){ 00449 ap=&default_ap; 00450 memset(ap, 0, sizeof(default_ap)); 00451 } 00452 opts = convert_format_parameters(ap); 00453 00454 if(!ap->prealloced_context) 00455 ic = avformat_alloc_context(); 00456 else 00457 ic = *ic_ptr; 00458 if (!ic) { 00459 err = AVERROR(ENOMEM); 00460 goto fail; 00461 } 00462 if (pb && fmt && fmt->flags & AVFMT_NOFILE) 00463 av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and " 00464 "will be ignored with AVFMT_NOFILE format.\n"); 00465 else 00466 ic->pb = pb; 00467 00468 if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0) 00469 goto fail; 00470 ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above 00471 00472 fail: 00473 *ic_ptr = ic; 00474 av_dict_free(&opts); 00475 return err; 00476 } 00477 #endif 00478 00480 #define PROBE_BUF_MIN 2048 00481 #define PROBE_BUF_MAX (1<<20) 00482 00483 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, 00484 const char *filename, void *logctx, 00485 unsigned int offset, unsigned int max_probe_size) 00486 { 00487 AVProbeData pd = { filename ? filename : "", NULL, -offset }; 00488 unsigned char *buf = NULL; 00489 int ret = 0, probe_size; 00490 00491 if (!max_probe_size) { 00492 max_probe_size = PROBE_BUF_MAX; 00493 } else if (max_probe_size > PROBE_BUF_MAX) { 00494 max_probe_size = PROBE_BUF_MAX; 00495 } else if (max_probe_size < PROBE_BUF_MIN) { 00496 return AVERROR(EINVAL); 00497 } 00498 00499 if (offset >= max_probe_size) { 00500 return AVERROR(EINVAL); 00501 } 00502 00503 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0; 00504 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) { 00505 int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0; 00506 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1; 00507 00508 if (probe_size < offset) { 00509 continue; 00510 } 00511 00512 /* read probe data */ 00513 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE); 00514 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) { 00515 /* fail if error was not end of file, otherwise, lower score */ 00516 if (ret != AVERROR_EOF) { 00517 av_free(buf); 00518 return ret; 00519 } 00520 score = 0; 00521 ret = 0; /* error was end of file, nothing read */ 00522 } 00523 pd.buf_size += ret; 00524 pd.buf = &buf[offset]; 00525 00526 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); 00527 00528 /* guess file format */ 00529 *fmt = av_probe_input_format2(&pd, 1, &score); 00530 if(*fmt){ 00531 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration 00532 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score); 00533 }else 00534 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score); 00535 } 00536 } 00537 00538 if (!*fmt) { 00539 av_free(buf); 00540 return AVERROR_INVALIDDATA; 00541 } 00542 00543 /* rewind. reuse probe buffer to avoid seeking */ 00544 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0) 00545 av_free(buf); 00546 00547 return ret; 00548 } 00549 00550 #if FF_API_FORMAT_PARAMETERS 00551 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename, 00552 AVInputFormat *fmt, 00553 int buf_size, 00554 AVFormatParameters *ap) 00555 { 00556 int err; 00557 AVDictionary *opts = convert_format_parameters(ap); 00558 00559 if (!ap || !ap->prealloced_context) 00560 *ic_ptr = NULL; 00561 00562 err = avformat_open_input(ic_ptr, filename, fmt, &opts); 00563 00564 av_dict_free(&opts); 00565 return err; 00566 } 00567 #endif 00568 00569 /* open input file and probe the format if necessary */ 00570 static int init_input(AVFormatContext *s, const char *filename) 00571 { 00572 int ret; 00573 AVProbeData pd = {filename, NULL, 0}; 00574 00575 if (s->pb) { 00576 s->flags |= AVFMT_FLAG_CUSTOM_IO; 00577 if (!s->iformat) 00578 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0); 00579 else if (s->iformat->flags & AVFMT_NOFILE) 00580 return AVERROR(EINVAL); 00581 return 0; 00582 } 00583 00584 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) || 00585 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0)))) 00586 return 0; 00587 00588 if ((ret = avio_open(&s->pb, filename, AVIO_FLAG_READ)) < 0) 00589 return ret; 00590 if (s->iformat) 00591 return 0; 00592 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0); 00593 } 00594 00595 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options) 00596 { 00597 AVFormatContext *s = *ps; 00598 int ret = 0; 00599 AVFormatParameters ap = { 0 }; 00600 AVDictionary *tmp = NULL; 00601 00602 if (!s && !(s = avformat_alloc_context())) 00603 return AVERROR(ENOMEM); 00604 if (fmt) 00605 s->iformat = fmt; 00606 00607 if (options) 00608 av_dict_copy(&tmp, *options, 0); 00609 00610 if ((ret = av_opt_set_dict(s, &tmp)) < 0) 00611 goto fail; 00612 00613 if ((ret = init_input(s, filename)) < 0) 00614 goto fail; 00615 00616 /* check filename in case an image number is expected */ 00617 if (s->iformat->flags & AVFMT_NEEDNUMBER) { 00618 if (!av_filename_number_test(filename)) { 00619 ret = AVERROR(EINVAL); 00620 goto fail; 00621 } 00622 } 00623 00624 s->duration = s->start_time = AV_NOPTS_VALUE; 00625 av_strlcpy(s->filename, filename, sizeof(s->filename)); 00626 00627 /* allocate private data */ 00628 if (s->iformat->priv_data_size > 0) { 00629 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) { 00630 ret = AVERROR(ENOMEM); 00631 goto fail; 00632 } 00633 if (s->iformat->priv_class) { 00634 *(const AVClass**)s->priv_data = s->iformat->priv_class; 00635 av_opt_set_defaults(s->priv_data); 00636 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0) 00637 goto fail; 00638 } 00639 } 00640 00641 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */ 00642 if (s->pb) 00643 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC); 00644 00645 if (s->iformat->read_header) 00646 if ((ret = s->iformat->read_header(s, &ap)) < 0) 00647 goto fail; 00648 00649 if (s->pb && !s->data_offset) 00650 s->data_offset = avio_tell(s->pb); 00651 00652 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE; 00653 00654 if (options) { 00655 av_dict_free(options); 00656 *options = tmp; 00657 } 00658 *ps = s; 00659 return 0; 00660 00661 fail: 00662 av_dict_free(&tmp); 00663 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO)) 00664 avio_close(s->pb); 00665 avformat_free_context(s); 00666 *ps = NULL; 00667 return ret; 00668 } 00669 00670 /*******************************************************/ 00671 00672 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt, 00673 AVPacketList **plast_pktl){ 00674 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList)); 00675 if (!pktl) 00676 return NULL; 00677 00678 if (*packet_buffer) 00679 (*plast_pktl)->next = pktl; 00680 else 00681 *packet_buffer = pktl; 00682 00683 /* add the packet in the buffered packet list */ 00684 *plast_pktl = pktl; 00685 pktl->pkt= *pkt; 00686 return &pktl->pkt; 00687 } 00688 00689 int av_read_packet(AVFormatContext *s, AVPacket *pkt) 00690 { 00691 int ret, i; 00692 AVStream *st; 00693 00694 for(;;){ 00695 AVPacketList *pktl = s->raw_packet_buffer; 00696 00697 if (pktl) { 00698 *pkt = pktl->pkt; 00699 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE || 00700 !s->streams[pkt->stream_index]->probe_packets || 00701 s->raw_packet_buffer_remaining_size < pkt->size){ 00702 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data; 00703 av_freep(&pd->buf); 00704 pd->buf_size = 0; 00705 s->raw_packet_buffer = pktl->next; 00706 s->raw_packet_buffer_remaining_size += pkt->size; 00707 av_free(pktl); 00708 return 0; 00709 } 00710 } 00711 00712 av_init_packet(pkt); 00713 ret= s->iformat->read_packet(s, pkt); 00714 if (ret < 0) { 00715 if (!pktl || ret == AVERROR(EAGAIN)) 00716 return ret; 00717 for (i = 0; i < s->nb_streams; i++) 00718 s->streams[i]->probe_packets = 0; 00719 continue; 00720 } 00721 st= s->streams[pkt->stream_index]; 00722 00723 switch(st->codec->codec_type){ 00724 case AVMEDIA_TYPE_VIDEO: 00725 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id; 00726 break; 00727 case AVMEDIA_TYPE_AUDIO: 00728 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id; 00729 break; 00730 case AVMEDIA_TYPE_SUBTITLE: 00731 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id; 00732 break; 00733 } 00734 00735 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE || 00736 !st->probe_packets)) 00737 return ret; 00738 00739 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end); 00740 s->raw_packet_buffer_remaining_size -= pkt->size; 00741 00742 if(st->codec->codec_id == CODEC_ID_PROBE){ 00743 AVProbeData *pd = &st->probe_data; 00744 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index); 00745 --st->probe_packets; 00746 00747 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE); 00748 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size); 00749 pd->buf_size += pkt->size; 00750 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE); 00751 00752 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){ 00753 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes 00754 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0); 00755 if(st->codec->codec_id != CODEC_ID_PROBE){ 00756 pd->buf_size=0; 00757 av_freep(&pd->buf); 00758 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index); 00759 } 00760 } 00761 } 00762 } 00763 } 00764 00765 /**********************************************************/ 00766 00770 static int get_audio_frame_size(AVCodecContext *enc, int size) 00771 { 00772 int frame_size; 00773 00774 if(enc->codec_id == CODEC_ID_VORBIS) 00775 return -1; 00776 00777 if (enc->frame_size <= 1) { 00778 int bits_per_sample = av_get_bits_per_sample(enc->codec_id); 00779 00780 if (bits_per_sample) { 00781 if (enc->channels == 0) 00782 return -1; 00783 frame_size = (size << 3) / (bits_per_sample * enc->channels); 00784 } else { 00785 /* used for example by ADPCM codecs */ 00786 if (enc->bit_rate == 0) 00787 return -1; 00788 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate; 00789 } 00790 } else { 00791 frame_size = enc->frame_size; 00792 } 00793 return frame_size; 00794 } 00795 00796 00800 static void compute_frame_duration(int *pnum, int *pden, AVStream *st, 00801 AVCodecParserContext *pc, AVPacket *pkt) 00802 { 00803 int frame_size; 00804 00805 *pnum = 0; 00806 *pden = 0; 00807 switch(st->codec->codec_type) { 00808 case AVMEDIA_TYPE_VIDEO: 00809 if(st->time_base.num*1000LL > st->time_base.den){ 00810 *pnum = st->time_base.num; 00811 *pden = st->time_base.den; 00812 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){ 00813 *pnum = st->codec->time_base.num; 00814 *pden = st->codec->time_base.den; 00815 if (pc && pc->repeat_pict) { 00816 *pnum = (*pnum) * (1 + pc->repeat_pict); 00817 } 00818 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet 00819 //Thus if we have no parser in such case leave duration undefined. 00820 if(st->codec->ticks_per_frame>1 && !pc){ 00821 *pnum = *pden = 0; 00822 } 00823 } 00824 break; 00825 case AVMEDIA_TYPE_AUDIO: 00826 frame_size = get_audio_frame_size(st->codec, pkt->size); 00827 if (frame_size <= 0 || st->codec->sample_rate <= 0) 00828 break; 00829 *pnum = frame_size; 00830 *pden = st->codec->sample_rate; 00831 break; 00832 default: 00833 break; 00834 } 00835 } 00836 00837 static int is_intra_only(AVCodecContext *enc){ 00838 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){ 00839 return 1; 00840 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){ 00841 switch(enc->codec_id){ 00842 case CODEC_ID_MJPEG: 00843 case CODEC_ID_MJPEGB: 00844 case CODEC_ID_LJPEG: 00845 case CODEC_ID_RAWVIDEO: 00846 case CODEC_ID_DVVIDEO: 00847 case CODEC_ID_HUFFYUV: 00848 case CODEC_ID_FFVHUFF: 00849 case CODEC_ID_ASV1: 00850 case CODEC_ID_ASV2: 00851 case CODEC_ID_VCR1: 00852 case CODEC_ID_DNXHD: 00853 case CODEC_ID_JPEG2000: 00854 return 1; 00855 default: break; 00856 } 00857 } 00858 return 0; 00859 } 00860 00861 static void update_initial_timestamps(AVFormatContext *s, int stream_index, 00862 int64_t dts, int64_t pts) 00863 { 00864 AVStream *st= s->streams[stream_index]; 00865 AVPacketList *pktl= s->packet_buffer; 00866 00867 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE) 00868 return; 00869 00870 st->first_dts= dts - st->cur_dts; 00871 st->cur_dts= dts; 00872 00873 for(; pktl; pktl= pktl->next){ 00874 if(pktl->pkt.stream_index != stream_index) 00875 continue; 00876 //FIXME think more about this check 00877 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts) 00878 pktl->pkt.pts += st->first_dts; 00879 00880 if(pktl->pkt.dts != AV_NOPTS_VALUE) 00881 pktl->pkt.dts += st->first_dts; 00882 00883 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE) 00884 st->start_time= pktl->pkt.pts; 00885 } 00886 if (st->start_time == AV_NOPTS_VALUE) 00887 st->start_time = pts; 00888 } 00889 00890 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt) 00891 { 00892 AVPacketList *pktl= s->packet_buffer; 00893 int64_t cur_dts= 0; 00894 00895 if(st->first_dts != AV_NOPTS_VALUE){ 00896 cur_dts= st->first_dts; 00897 for(; pktl; pktl= pktl->next){ 00898 if(pktl->pkt.stream_index == pkt->stream_index){ 00899 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration) 00900 break; 00901 cur_dts -= pkt->duration; 00902 } 00903 } 00904 pktl= s->packet_buffer; 00905 st->first_dts = cur_dts; 00906 }else if(st->cur_dts) 00907 return; 00908 00909 for(; pktl; pktl= pktl->next){ 00910 if(pktl->pkt.stream_index != pkt->stream_index) 00911 continue; 00912 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE 00913 && !pktl->pkt.duration){ 00914 pktl->pkt.dts= cur_dts; 00915 if(!st->codec->has_b_frames) 00916 pktl->pkt.pts= cur_dts; 00917 cur_dts += pkt->duration; 00918 pktl->pkt.duration= pkt->duration; 00919 }else 00920 break; 00921 } 00922 if(st->first_dts == AV_NOPTS_VALUE) 00923 st->cur_dts= cur_dts; 00924 } 00925 00926 static void compute_pkt_fields(AVFormatContext *s, AVStream *st, 00927 AVCodecParserContext *pc, AVPacket *pkt) 00928 { 00929 int num, den, presentation_delayed, delay, i; 00930 int64_t offset; 00931 00932 if (s->flags & AVFMT_FLAG_NOFILLIN) 00933 return; 00934 00935 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE) 00936 pkt->dts= AV_NOPTS_VALUE; 00937 00938 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B) 00939 //FIXME Set low_delay = 0 when has_b_frames = 1 00940 st->codec->has_b_frames = 1; 00941 00942 /* do we have a video B-frame ? */ 00943 delay= st->codec->has_b_frames; 00944 presentation_delayed = 0; 00945 00946 // ignore delay caused by frame threading so that the mpeg2-without-dts 00947 // warning will not trigger 00948 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME) 00949 delay -= st->codec->thread_count-1; 00950 00951 /* XXX: need has_b_frame, but cannot get it if the codec is 00952 not initialized */ 00953 if (delay && 00954 pc && pc->pict_type != AV_PICTURE_TYPE_B) 00955 presentation_delayed = 1; 00956 00957 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63 00958 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){ 00959 pkt->dts -= 1LL<<st->pts_wrap_bits; 00960 } 00961 00962 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg) 00963 // we take the conservative approach and discard both 00964 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly. 00965 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){ 00966 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n"); 00967 pkt->dts= pkt->pts= AV_NOPTS_VALUE; 00968 } 00969 00970 if (pkt->duration == 0) { 00971 compute_frame_duration(&num, &den, st, pc, pkt); 00972 if (den && num) { 00973 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN); 00974 00975 if(pkt->duration != 0 && s->packet_buffer) 00976 update_initial_durations(s, st, pkt); 00977 } 00978 } 00979 00980 /* correct timestamps with byte offset if demuxers only have timestamps 00981 on packet boundaries */ 00982 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){ 00983 /* this will estimate bitrate based on this frame's duration and size */ 00984 offset = av_rescale(pc->offset, pkt->duration, pkt->size); 00985 if(pkt->pts != AV_NOPTS_VALUE) 00986 pkt->pts += offset; 00987 if(pkt->dts != AV_NOPTS_VALUE) 00988 pkt->dts += offset; 00989 } 00990 00991 if (pc && pc->dts_sync_point >= 0) { 00992 // we have synchronization info from the parser 00993 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num; 00994 if (den > 0) { 00995 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den; 00996 if (pkt->dts != AV_NOPTS_VALUE) { 00997 // got DTS from the stream, update reference timestamp 00998 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den; 00999 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; 01000 } else if (st->reference_dts != AV_NOPTS_VALUE) { 01001 // compute DTS based on reference timestamp 01002 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den; 01003 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; 01004 } 01005 if (pc->dts_sync_point > 0) 01006 st->reference_dts = pkt->dts; // new reference 01007 } 01008 } 01009 01010 /* This may be redundant, but it should not hurt. */ 01011 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts) 01012 presentation_delayed = 1; 01013 01014 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc); 01015 /* interpolate PTS and DTS if they are not present */ 01016 //We skip H264 currently because delay and has_b_frames are not reliably set 01017 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){ 01018 if (presentation_delayed) { 01019 /* DTS = decompression timestamp */ 01020 /* PTS = presentation timestamp */ 01021 if (pkt->dts == AV_NOPTS_VALUE) 01022 pkt->dts = st->last_IP_pts; 01023 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); 01024 if (pkt->dts == AV_NOPTS_VALUE) 01025 pkt->dts = st->cur_dts; 01026 01027 /* this is tricky: the dts must be incremented by the duration 01028 of the frame we are displaying, i.e. the last I- or P-frame */ 01029 if (st->last_IP_duration == 0) 01030 st->last_IP_duration = pkt->duration; 01031 if(pkt->dts != AV_NOPTS_VALUE) 01032 st->cur_dts = pkt->dts + st->last_IP_duration; 01033 st->last_IP_duration = pkt->duration; 01034 st->last_IP_pts= pkt->pts; 01035 /* cannot compute PTS if not present (we can compute it only 01036 by knowing the future */ 01037 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){ 01038 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){ 01039 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts); 01040 int64_t new_diff= FFABS(st->cur_dts - pkt->pts); 01041 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){ 01042 pkt->pts += pkt->duration; 01043 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size); 01044 } 01045 } 01046 01047 /* presentation is not delayed : PTS and DTS are the same */ 01048 if(pkt->pts == AV_NOPTS_VALUE) 01049 pkt->pts = pkt->dts; 01050 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts); 01051 if(pkt->pts == AV_NOPTS_VALUE) 01052 pkt->pts = st->cur_dts; 01053 pkt->dts = pkt->pts; 01054 if(pkt->pts != AV_NOPTS_VALUE) 01055 st->cur_dts = pkt->pts + pkt->duration; 01056 } 01057 } 01058 01059 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ 01060 st->pts_buffer[0]= pkt->pts; 01061 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) 01062 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); 01063 if(pkt->dts == AV_NOPTS_VALUE) 01064 pkt->dts= st->pts_buffer[0]; 01065 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here 01066 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet 01067 } 01068 if(pkt->dts > st->cur_dts) 01069 st->cur_dts = pkt->dts; 01070 } 01071 01072 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts); 01073 01074 /* update flags */ 01075 if(is_intra_only(st->codec)) 01076 pkt->flags |= AV_PKT_FLAG_KEY; 01077 else if (pc) { 01078 pkt->flags = 0; 01079 /* keyframe computation */ 01080 if (pc->key_frame == 1) 01081 pkt->flags |= AV_PKT_FLAG_KEY; 01082 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I) 01083 pkt->flags |= AV_PKT_FLAG_KEY; 01084 } 01085 if (pc) 01086 pkt->convergence_duration = pc->convergence_duration; 01087 } 01088 01089 01090 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt) 01091 { 01092 AVStream *st; 01093 int len, ret, i; 01094 01095 av_init_packet(pkt); 01096 01097 for(;;) { 01098 /* select current input stream component */ 01099 st = s->cur_st; 01100 if (st) { 01101 if (!st->need_parsing || !st->parser) { 01102 /* no parsing needed: we just output the packet as is */ 01103 /* raw data support */ 01104 *pkt = st->cur_pkt; st->cur_pkt.data= NULL; 01105 compute_pkt_fields(s, st, NULL, pkt); 01106 s->cur_st = NULL; 01107 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && 01108 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) { 01109 ff_reduce_index(s, st->index); 01110 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME); 01111 } 01112 break; 01113 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) { 01114 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size, 01115 st->cur_ptr, st->cur_len, 01116 st->cur_pkt.pts, st->cur_pkt.dts, 01117 st->cur_pkt.pos); 01118 st->cur_pkt.pts = AV_NOPTS_VALUE; 01119 st->cur_pkt.dts = AV_NOPTS_VALUE; 01120 /* increment read pointer */ 01121 st->cur_ptr += len; 01122 st->cur_len -= len; 01123 01124 /* return packet if any */ 01125 if (pkt->size) { 01126 got_packet: 01127 pkt->duration = 0; 01128 pkt->stream_index = st->index; 01129 pkt->pts = st->parser->pts; 01130 pkt->dts = st->parser->dts; 01131 pkt->pos = st->parser->pos; 01132 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){ 01133 s->cur_st = NULL; 01134 pkt->destruct= st->cur_pkt.destruct; 01135 st->cur_pkt.destruct= NULL; 01136 st->cur_pkt.data = NULL; 01137 assert(st->cur_len == 0); 01138 }else{ 01139 pkt->destruct = NULL; 01140 } 01141 compute_pkt_fields(s, st, st->parser, pkt); 01142 01143 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){ 01144 ff_reduce_index(s, st->index); 01145 av_add_index_entry(st, st->parser->frame_offset, pkt->dts, 01146 0, 0, AVINDEX_KEYFRAME); 01147 } 01148 01149 break; 01150 } 01151 } else { 01152 /* free packet */ 01153 av_free_packet(&st->cur_pkt); 01154 s->cur_st = NULL; 01155 } 01156 } else { 01157 AVPacket cur_pkt; 01158 /* read next packet */ 01159 ret = av_read_packet(s, &cur_pkt); 01160 if (ret < 0) { 01161 if (ret == AVERROR(EAGAIN)) 01162 return ret; 01163 /* return the last frames, if any */ 01164 for(i = 0; i < s->nb_streams; i++) { 01165 st = s->streams[i]; 01166 if (st->parser && st->need_parsing) { 01167 av_parser_parse2(st->parser, st->codec, 01168 &pkt->data, &pkt->size, 01169 NULL, 0, 01170 AV_NOPTS_VALUE, AV_NOPTS_VALUE, 01171 AV_NOPTS_VALUE); 01172 if (pkt->size) 01173 goto got_packet; 01174 } 01175 } 01176 /* no more packets: really terminate parsing */ 01177 return ret; 01178 } 01179 st = s->streams[cur_pkt.stream_index]; 01180 st->cur_pkt= cur_pkt; 01181 01182 if(st->cur_pkt.pts != AV_NOPTS_VALUE && 01183 st->cur_pkt.dts != AV_NOPTS_VALUE && 01184 st->cur_pkt.pts < st->cur_pkt.dts){ 01185 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n", 01186 st->cur_pkt.stream_index, 01187 st->cur_pkt.pts, 01188 st->cur_pkt.dts, 01189 st->cur_pkt.size); 01190 // av_free_packet(&st->cur_pkt); 01191 // return -1; 01192 } 01193 01194 if(s->debug & FF_FDEBUG_TS) 01195 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n", 01196 st->cur_pkt.stream_index, 01197 st->cur_pkt.pts, 01198 st->cur_pkt.dts, 01199 st->cur_pkt.size, 01200 st->cur_pkt.duration, 01201 st->cur_pkt.flags); 01202 01203 s->cur_st = st; 01204 st->cur_ptr = st->cur_pkt.data; 01205 st->cur_len = st->cur_pkt.size; 01206 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) { 01207 st->parser = av_parser_init(st->codec->codec_id); 01208 if (!st->parser) { 01209 /* no parser available: just output the raw packets */ 01210 st->need_parsing = AVSTREAM_PARSE_NONE; 01211 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){ 01212 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; 01213 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){ 01214 st->parser->flags |= PARSER_FLAG_ONCE; 01215 } 01216 } 01217 } 01218 } 01219 if(s->debug & FF_FDEBUG_TS) 01220 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n", 01221 pkt->stream_index, 01222 pkt->pts, 01223 pkt->dts, 01224 pkt->size, 01225 pkt->duration, 01226 pkt->flags); 01227 01228 return 0; 01229 } 01230 01231 int av_read_frame(AVFormatContext *s, AVPacket *pkt) 01232 { 01233 AVPacketList *pktl; 01234 int eof=0; 01235 const int genpts= s->flags & AVFMT_FLAG_GENPTS; 01236 01237 for(;;){ 01238 pktl = s->packet_buffer; 01239 if (pktl) { 01240 AVPacket *next_pkt= &pktl->pkt; 01241 01242 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){ 01243 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits; 01244 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){ 01245 if( pktl->pkt.stream_index == next_pkt->stream_index 01246 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) 01247 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame 01248 next_pkt->pts= pktl->pkt.dts; 01249 } 01250 pktl= pktl->next; 01251 } 01252 pktl = s->packet_buffer; 01253 } 01254 01255 if( next_pkt->pts != AV_NOPTS_VALUE 01256 || next_pkt->dts == AV_NOPTS_VALUE 01257 || !genpts || eof){ 01258 /* read packet from packet buffer, if there is data */ 01259 *pkt = *next_pkt; 01260 s->packet_buffer = pktl->next; 01261 av_free(pktl); 01262 return 0; 01263 } 01264 } 01265 if(genpts){ 01266 int ret= av_read_frame_internal(s, pkt); 01267 if(ret<0){ 01268 if(pktl && ret != AVERROR(EAGAIN)){ 01269 eof=1; 01270 continue; 01271 }else 01272 return ret; 01273 } 01274 01275 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt, 01276 &s->packet_buffer_end)) < 0) 01277 return AVERROR(ENOMEM); 01278 }else{ 01279 assert(!s->packet_buffer); 01280 return av_read_frame_internal(s, pkt); 01281 } 01282 } 01283 } 01284 01285 /* XXX: suppress the packet queue */ 01286 static void flush_packet_queue(AVFormatContext *s) 01287 { 01288 AVPacketList *pktl; 01289 01290 for(;;) { 01291 pktl = s->packet_buffer; 01292 if (!pktl) 01293 break; 01294 s->packet_buffer = pktl->next; 01295 av_free_packet(&pktl->pkt); 01296 av_free(pktl); 01297 } 01298 while(s->raw_packet_buffer){ 01299 pktl = s->raw_packet_buffer; 01300 s->raw_packet_buffer = pktl->next; 01301 av_free_packet(&pktl->pkt); 01302 av_free(pktl); 01303 } 01304 s->packet_buffer_end= 01305 s->raw_packet_buffer_end= NULL; 01306 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE; 01307 } 01308 01309 /*******************************************************/ 01310 /* seek support */ 01311 01312 int av_find_default_stream_index(AVFormatContext *s) 01313 { 01314 int first_audio_index = -1; 01315 int i; 01316 AVStream *st; 01317 01318 if (s->nb_streams <= 0) 01319 return -1; 01320 for(i = 0; i < s->nb_streams; i++) { 01321 st = s->streams[i]; 01322 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 01323 return i; 01324 } 01325 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) 01326 first_audio_index = i; 01327 } 01328 return first_audio_index >= 0 ? first_audio_index : 0; 01329 } 01330 01334 void ff_read_frame_flush(AVFormatContext *s) 01335 { 01336 AVStream *st; 01337 int i, j; 01338 01339 flush_packet_queue(s); 01340 01341 s->cur_st = NULL; 01342 01343 /* for each stream, reset read state */ 01344 for(i = 0; i < s->nb_streams; i++) { 01345 st = s->streams[i]; 01346 01347 if (st->parser) { 01348 av_parser_close(st->parser); 01349 st->parser = NULL; 01350 av_free_packet(&st->cur_pkt); 01351 } 01352 st->last_IP_pts = AV_NOPTS_VALUE; 01353 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */ 01354 st->reference_dts = AV_NOPTS_VALUE; 01355 /* fail safe */ 01356 st->cur_ptr = NULL; 01357 st->cur_len = 0; 01358 01359 st->probe_packets = MAX_PROBE_PACKETS; 01360 01361 for(j=0; j<MAX_REORDER_DELAY+1; j++) 01362 st->pts_buffer[j]= AV_NOPTS_VALUE; 01363 } 01364 } 01365 01366 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){ 01367 int i; 01368 01369 for(i = 0; i < s->nb_streams; i++) { 01370 AVStream *st = s->streams[i]; 01371 01372 st->cur_dts = av_rescale(timestamp, 01373 st->time_base.den * (int64_t)ref_st->time_base.num, 01374 st->time_base.num * (int64_t)ref_st->time_base.den); 01375 } 01376 } 01377 01378 void ff_reduce_index(AVFormatContext *s, int stream_index) 01379 { 01380 AVStream *st= s->streams[stream_index]; 01381 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry); 01382 01383 if((unsigned)st->nb_index_entries >= max_entries){ 01384 int i; 01385 for(i=0; 2*i<st->nb_index_entries; i++) 01386 st->index_entries[i]= st->index_entries[2*i]; 01387 st->nb_index_entries= i; 01388 } 01389 } 01390 01391 int ff_add_index_entry(AVIndexEntry **index_entries, 01392 int *nb_index_entries, 01393 unsigned int *index_entries_allocated_size, 01394 int64_t pos, int64_t timestamp, int size, int distance, int flags) 01395 { 01396 AVIndexEntry *entries, *ie; 01397 int index; 01398 01399 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry)) 01400 return -1; 01401 01402 entries = av_fast_realloc(*index_entries, 01403 index_entries_allocated_size, 01404 (*nb_index_entries + 1) * 01405 sizeof(AVIndexEntry)); 01406 if(!entries) 01407 return -1; 01408 01409 *index_entries= entries; 01410 01411 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY); 01412 01413 if(index<0){ 01414 index= (*nb_index_entries)++; 01415 ie= &entries[index]; 01416 assert(index==0 || ie[-1].timestamp < timestamp); 01417 }else{ 01418 ie= &entries[index]; 01419 if(ie->timestamp != timestamp){ 01420 if(ie->timestamp <= timestamp) 01421 return -1; 01422 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index)); 01423 (*nb_index_entries)++; 01424 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance 01425 distance= ie->min_distance; 01426 } 01427 01428 ie->pos = pos; 01429 ie->timestamp = timestamp; 01430 ie->min_distance= distance; 01431 ie->size= size; 01432 ie->flags = flags; 01433 01434 return index; 01435 } 01436 01437 int av_add_index_entry(AVStream *st, 01438 int64_t pos, int64_t timestamp, int size, int distance, int flags) 01439 { 01440 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries, 01441 &st->index_entries_allocated_size, pos, 01442 timestamp, size, distance, flags); 01443 } 01444 01445 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries, 01446 int64_t wanted_timestamp, int flags) 01447 { 01448 int a, b, m; 01449 int64_t timestamp; 01450 01451 a = - 1; 01452 b = nb_entries; 01453 01454 //optimize appending index entries at the end 01455 if(b && entries[b-1].timestamp < wanted_timestamp) 01456 a= b-1; 01457 01458 while (b - a > 1) { 01459 m = (a + b) >> 1; 01460 timestamp = entries[m].timestamp; 01461 if(timestamp >= wanted_timestamp) 01462 b = m; 01463 if(timestamp <= wanted_timestamp) 01464 a = m; 01465 } 01466 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b; 01467 01468 if(!(flags & AVSEEK_FLAG_ANY)){ 01469 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){ 01470 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1; 01471 } 01472 } 01473 01474 if(m == nb_entries) 01475 return -1; 01476 return m; 01477 } 01478 01479 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, 01480 int flags) 01481 { 01482 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries, 01483 wanted_timestamp, flags); 01484 } 01485 01486 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){ 01487 AVInputFormat *avif= s->iformat; 01488 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit; 01489 int64_t ts_min, ts_max, ts; 01490 int index; 01491 int64_t ret; 01492 AVStream *st; 01493 01494 if (stream_index < 0) 01495 return -1; 01496 01497 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts); 01498 01499 ts_max= 01500 ts_min= AV_NOPTS_VALUE; 01501 pos_limit= -1; //gcc falsely says it may be uninitialized 01502 01503 st= s->streams[stream_index]; 01504 if(st->index_entries){ 01505 AVIndexEntry *e; 01506 01507 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp() 01508 index= FFMAX(index, 0); 01509 e= &st->index_entries[index]; 01510 01511 if(e->timestamp <= target_ts || e->pos == e->min_distance){ 01512 pos_min= e->pos; 01513 ts_min= e->timestamp; 01514 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n", 01515 pos_min,ts_min); 01516 }else{ 01517 assert(index==0); 01518 } 01519 01520 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD); 01521 assert(index < st->nb_index_entries); 01522 if(index >= 0){ 01523 e= &st->index_entries[index]; 01524 assert(e->timestamp >= target_ts); 01525 pos_max= e->pos; 01526 ts_max= e->timestamp; 01527 pos_limit= pos_max - e->min_distance; 01528 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n", 01529 pos_max,pos_limit, ts_max); 01530 } 01531 } 01532 01533 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp); 01534 if(pos<0) 01535 return -1; 01536 01537 /* do the seek */ 01538 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0) 01539 return ret; 01540 01541 av_update_cur_dts(s, st, ts); 01542 01543 return 0; 01544 } 01545 01546 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){ 01547 int64_t pos, ts; 01548 int64_t start_pos, filesize; 01549 int no_change; 01550 01551 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts); 01552 01553 if(ts_min == AV_NOPTS_VALUE){ 01554 pos_min = s->data_offset; 01555 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 01556 if (ts_min == AV_NOPTS_VALUE) 01557 return -1; 01558 } 01559 01560 if(ts_max == AV_NOPTS_VALUE){ 01561 int step= 1024; 01562 filesize = avio_size(s->pb); 01563 pos_max = filesize - 1; 01564 do{ 01565 pos_max -= step; 01566 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step); 01567 step += step; 01568 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step); 01569 if (ts_max == AV_NOPTS_VALUE) 01570 return -1; 01571 01572 for(;;){ 01573 int64_t tmp_pos= pos_max + 1; 01574 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX); 01575 if(tmp_ts == AV_NOPTS_VALUE) 01576 break; 01577 ts_max= tmp_ts; 01578 pos_max= tmp_pos; 01579 if(tmp_pos >= filesize) 01580 break; 01581 } 01582 pos_limit= pos_max; 01583 } 01584 01585 if(ts_min > ts_max){ 01586 return -1; 01587 }else if(ts_min == ts_max){ 01588 pos_limit= pos_min; 01589 } 01590 01591 no_change=0; 01592 while (pos_min < pos_limit) { 01593 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n", 01594 pos_min, pos_max, ts_min, ts_max); 01595 assert(pos_limit <= pos_max); 01596 01597 if(no_change==0){ 01598 int64_t approximate_keyframe_distance= pos_max - pos_limit; 01599 // interpolate position (better than dichotomy) 01600 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min) 01601 + pos_min - approximate_keyframe_distance; 01602 }else if(no_change==1){ 01603 // bisection, if interpolation failed to change min or max pos last time 01604 pos = (pos_min + pos_limit)>>1; 01605 }else{ 01606 /* linear search if bisection failed, can only happen if there 01607 are very few or no keyframes between min/max */ 01608 pos=pos_min; 01609 } 01610 if(pos <= pos_min) 01611 pos= pos_min + 1; 01612 else if(pos > pos_limit) 01613 pos= pos_limit; 01614 start_pos= pos; 01615 01616 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1 01617 if(pos == pos_max) 01618 no_change++; 01619 else 01620 no_change=0; 01621 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", 01622 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, 01623 pos_limit, start_pos, no_change); 01624 if(ts == AV_NOPTS_VALUE){ 01625 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n"); 01626 return -1; 01627 } 01628 assert(ts != AV_NOPTS_VALUE); 01629 if (target_ts <= ts) { 01630 pos_limit = start_pos - 1; 01631 pos_max = pos; 01632 ts_max = ts; 01633 } 01634 if (target_ts >= ts) { 01635 pos_min = pos; 01636 ts_min = ts; 01637 } 01638 } 01639 01640 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; 01641 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max; 01642 pos_min = pos; 01643 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 01644 pos_min++; 01645 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 01646 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n", 01647 pos, ts_min, target_ts, ts_max); 01648 *ts_ret= ts; 01649 return pos; 01650 } 01651 01652 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){ 01653 int64_t pos_min, pos_max; 01654 #if 0 01655 AVStream *st; 01656 01657 if (stream_index < 0) 01658 return -1; 01659 01660 st= s->streams[stream_index]; 01661 #endif 01662 01663 pos_min = s->data_offset; 01664 pos_max = avio_size(s->pb) - 1; 01665 01666 if (pos < pos_min) pos= pos_min; 01667 else if(pos > pos_max) pos= pos_max; 01668 01669 avio_seek(s->pb, pos, SEEK_SET); 01670 01671 #if 0 01672 av_update_cur_dts(s, st, ts); 01673 #endif 01674 return 0; 01675 } 01676 01677 static int av_seek_frame_generic(AVFormatContext *s, 01678 int stream_index, int64_t timestamp, int flags) 01679 { 01680 int index; 01681 int64_t ret; 01682 AVStream *st; 01683 AVIndexEntry *ie; 01684 01685 st = s->streams[stream_index]; 01686 01687 index = av_index_search_timestamp(st, timestamp, flags); 01688 01689 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp) 01690 return -1; 01691 01692 if(index < 0 || index==st->nb_index_entries-1){ 01693 int i; 01694 AVPacket pkt; 01695 01696 if(st->nb_index_entries){ 01697 assert(st->index_entries); 01698 ie= &st->index_entries[st->nb_index_entries-1]; 01699 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0) 01700 return ret; 01701 av_update_cur_dts(s, st, ie->timestamp); 01702 }else{ 01703 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0) 01704 return ret; 01705 } 01706 for(i=0;; i++) { 01707 int ret; 01708 do{ 01709 ret = av_read_frame(s, &pkt); 01710 }while(ret == AVERROR(EAGAIN)); 01711 if(ret<0) 01712 break; 01713 av_free_packet(&pkt); 01714 if(stream_index == pkt.stream_index){ 01715 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp) 01716 break; 01717 } 01718 } 01719 index = av_index_search_timestamp(st, timestamp, flags); 01720 } 01721 if (index < 0) 01722 return -1; 01723 01724 ff_read_frame_flush(s); 01725 if (s->iformat->read_seek){ 01726 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0) 01727 return 0; 01728 } 01729 ie = &st->index_entries[index]; 01730 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0) 01731 return ret; 01732 av_update_cur_dts(s, st, ie->timestamp); 01733 01734 return 0; 01735 } 01736 01737 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) 01738 { 01739 int ret; 01740 AVStream *st; 01741 01742 ff_read_frame_flush(s); 01743 01744 if(flags & AVSEEK_FLAG_BYTE) 01745 return av_seek_frame_byte(s, stream_index, timestamp, flags); 01746 01747 if(stream_index < 0){ 01748 stream_index= av_find_default_stream_index(s); 01749 if(stream_index < 0) 01750 return -1; 01751 01752 st= s->streams[stream_index]; 01753 /* timestamp for default must be expressed in AV_TIME_BASE units */ 01754 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num); 01755 } 01756 01757 /* first, we try the format specific seek */ 01758 if (s->iformat->read_seek) 01759 ret = s->iformat->read_seek(s, stream_index, timestamp, flags); 01760 else 01761 ret = -1; 01762 if (ret >= 0) { 01763 return 0; 01764 } 01765 01766 if(s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) 01767 return av_seek_frame_binary(s, stream_index, timestamp, flags); 01768 else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) 01769 return av_seek_frame_generic(s, stream_index, timestamp, flags); 01770 else 01771 return -1; 01772 } 01773 01774 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags) 01775 { 01776 if(min_ts > ts || max_ts < ts) 01777 return -1; 01778 01779 ff_read_frame_flush(s); 01780 01781 if (s->iformat->read_seek2) 01782 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags); 01783 01784 if(s->iformat->read_timestamp){ 01785 //try to seek via read_timestamp() 01786 } 01787 01788 //Fallback to old API if new is not implemented but old is 01789 //Note the old has somewat different sematics 01790 if(s->iformat->read_seek || 1) 01791 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0)); 01792 01793 // try some generic seek like av_seek_frame_generic() but with new ts semantics 01794 } 01795 01796 /*******************************************************/ 01797 01803 static int av_has_duration(AVFormatContext *ic) 01804 { 01805 int i; 01806 AVStream *st; 01807 01808 for(i = 0;i < ic->nb_streams; i++) { 01809 st = ic->streams[i]; 01810 if (st->duration != AV_NOPTS_VALUE) 01811 return 1; 01812 } 01813 return 0; 01814 } 01815 01821 static void av_update_stream_timings(AVFormatContext *ic) 01822 { 01823 int64_t start_time, start_time1, end_time, end_time1; 01824 int64_t duration, duration1; 01825 int i; 01826 AVStream *st; 01827 01828 start_time = INT64_MAX; 01829 end_time = INT64_MIN; 01830 duration = INT64_MIN; 01831 for(i = 0;i < ic->nb_streams; i++) { 01832 st = ic->streams[i]; 01833 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) { 01834 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q); 01835 if (start_time1 < start_time) 01836 start_time = start_time1; 01837 if (st->duration != AV_NOPTS_VALUE) { 01838 end_time1 = start_time1 01839 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); 01840 if (end_time1 > end_time) 01841 end_time = end_time1; 01842 } 01843 } 01844 if (st->duration != AV_NOPTS_VALUE) { 01845 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); 01846 if (duration1 > duration) 01847 duration = duration1; 01848 } 01849 } 01850 if (start_time != INT64_MAX) { 01851 ic->start_time = start_time; 01852 if (end_time != INT64_MIN) { 01853 if (end_time - start_time > duration) 01854 duration = end_time - start_time; 01855 } 01856 } 01857 if (duration != INT64_MIN) { 01858 ic->duration = duration; 01859 if (ic->file_size > 0) { 01860 /* compute the bitrate */ 01861 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE / 01862 (double)ic->duration; 01863 } 01864 } 01865 } 01866 01867 static void fill_all_stream_timings(AVFormatContext *ic) 01868 { 01869 int i; 01870 AVStream *st; 01871 01872 av_update_stream_timings(ic); 01873 for(i = 0;i < ic->nb_streams; i++) { 01874 st = ic->streams[i]; 01875 if (st->start_time == AV_NOPTS_VALUE) { 01876 if(ic->start_time != AV_NOPTS_VALUE) 01877 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base); 01878 if(ic->duration != AV_NOPTS_VALUE) 01879 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base); 01880 } 01881 } 01882 } 01883 01884 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic) 01885 { 01886 int64_t filesize, duration; 01887 int bit_rate, i; 01888 AVStream *st; 01889 01890 /* if bit_rate is already set, we believe it */ 01891 if (ic->bit_rate <= 0) { 01892 bit_rate = 0; 01893 for(i=0;i<ic->nb_streams;i++) { 01894 st = ic->streams[i]; 01895 if (st->codec->bit_rate > 0) 01896 bit_rate += st->codec->bit_rate; 01897 } 01898 ic->bit_rate = bit_rate; 01899 } 01900 01901 /* if duration is already set, we believe it */ 01902 if (ic->duration == AV_NOPTS_VALUE && 01903 ic->bit_rate != 0 && 01904 ic->file_size != 0) { 01905 filesize = ic->file_size; 01906 if (filesize > 0) { 01907 for(i = 0; i < ic->nb_streams; i++) { 01908 st = ic->streams[i]; 01909 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num); 01910 if (st->duration == AV_NOPTS_VALUE) 01911 st->duration = duration; 01912 } 01913 } 01914 } 01915 } 01916 01917 #define DURATION_MAX_READ_SIZE 250000 01918 #define DURATION_MAX_RETRY 3 01919 01920 /* only usable for MPEG-PS streams */ 01921 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) 01922 { 01923 AVPacket pkt1, *pkt = &pkt1; 01924 AVStream *st; 01925 int read_size, i, ret; 01926 int64_t end_time; 01927 int64_t filesize, offset, duration; 01928 int retry=0; 01929 01930 ic->cur_st = NULL; 01931 01932 /* flush packet queue */ 01933 flush_packet_queue(ic); 01934 01935 for (i=0; i<ic->nb_streams; i++) { 01936 st = ic->streams[i]; 01937 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE) 01938 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n"); 01939 01940 if (st->parser) { 01941 av_parser_close(st->parser); 01942 st->parser= NULL; 01943 av_free_packet(&st->cur_pkt); 01944 } 01945 } 01946 01947 /* estimate the end time (duration) */ 01948 /* XXX: may need to support wrapping */ 01949 filesize = ic->file_size; 01950 end_time = AV_NOPTS_VALUE; 01951 do{ 01952 offset = filesize - (DURATION_MAX_READ_SIZE<<retry); 01953 if (offset < 0) 01954 offset = 0; 01955 01956 avio_seek(ic->pb, offset, SEEK_SET); 01957 read_size = 0; 01958 for(;;) { 01959 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0))) 01960 break; 01961 01962 do{ 01963 ret = av_read_packet(ic, pkt); 01964 }while(ret == AVERROR(EAGAIN)); 01965 if (ret != 0) 01966 break; 01967 read_size += pkt->size; 01968 st = ic->streams[pkt->stream_index]; 01969 if (pkt->pts != AV_NOPTS_VALUE && 01970 (st->start_time != AV_NOPTS_VALUE || 01971 st->first_dts != AV_NOPTS_VALUE)) { 01972 duration = end_time = pkt->pts; 01973 if (st->start_time != AV_NOPTS_VALUE) duration -= st->start_time; 01974 else duration -= st->first_dts; 01975 if (duration < 0) 01976 duration += 1LL<<st->pts_wrap_bits; 01977 if (duration > 0) { 01978 if (st->duration == AV_NOPTS_VALUE || 01979 st->duration < duration) 01980 st->duration = duration; 01981 } 01982 } 01983 av_free_packet(pkt); 01984 } 01985 }while( end_time==AV_NOPTS_VALUE 01986 && filesize > (DURATION_MAX_READ_SIZE<<retry) 01987 && ++retry <= DURATION_MAX_RETRY); 01988 01989 fill_all_stream_timings(ic); 01990 01991 avio_seek(ic->pb, old_offset, SEEK_SET); 01992 for (i=0; i<ic->nb_streams; i++) { 01993 st= ic->streams[i]; 01994 st->cur_dts= st->first_dts; 01995 st->last_IP_pts = AV_NOPTS_VALUE; 01996 st->reference_dts = AV_NOPTS_VALUE; 01997 } 01998 } 01999 02000 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset) 02001 { 02002 int64_t file_size; 02003 02004 /* get the file size, if possible */ 02005 if (ic->iformat->flags & AVFMT_NOFILE) { 02006 file_size = 0; 02007 } else { 02008 file_size = avio_size(ic->pb); 02009 if (file_size < 0) 02010 file_size = 0; 02011 } 02012 ic->file_size = file_size; 02013 02014 if ((!strcmp(ic->iformat->name, "mpeg") || 02015 !strcmp(ic->iformat->name, "mpegts")) && 02016 file_size && ic->pb->seekable) { 02017 /* get accurate estimate from the PTSes */ 02018 av_estimate_timings_from_pts(ic, old_offset); 02019 } else if (av_has_duration(ic)) { 02020 /* at least one component has timings - we use them for all 02021 the components */ 02022 fill_all_stream_timings(ic); 02023 } else { 02024 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n"); 02025 /* less precise: use bitrate info */ 02026 av_estimate_timings_from_bit_rate(ic); 02027 } 02028 av_update_stream_timings(ic); 02029 02030 { 02031 int i; 02032 AVStream av_unused *st; 02033 for(i = 0;i < ic->nb_streams; i++) { 02034 st = ic->streams[i]; 02035 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i, 02036 (double) st->start_time / AV_TIME_BASE, 02037 (double) st->duration / AV_TIME_BASE); 02038 } 02039 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n", 02040 (double) ic->start_time / AV_TIME_BASE, 02041 (double) ic->duration / AV_TIME_BASE, 02042 ic->bit_rate / 1000); 02043 } 02044 } 02045 02046 static int has_codec_parameters(AVCodecContext *enc) 02047 { 02048 int val; 02049 switch(enc->codec_type) { 02050 case AVMEDIA_TYPE_AUDIO: 02051 val = enc->sample_rate && enc->channels && enc->sample_fmt != AV_SAMPLE_FMT_NONE; 02052 if(!enc->frame_size && 02053 (enc->codec_id == CODEC_ID_VORBIS || 02054 enc->codec_id == CODEC_ID_AAC || 02055 enc->codec_id == CODEC_ID_MP1 || 02056 enc->codec_id == CODEC_ID_MP2 || 02057 enc->codec_id == CODEC_ID_MP3 || 02058 enc->codec_id == CODEC_ID_SPEEX)) 02059 return 0; 02060 break; 02061 case AVMEDIA_TYPE_VIDEO: 02062 val = enc->width && enc->pix_fmt != PIX_FMT_NONE; 02063 break; 02064 default: 02065 val = 1; 02066 break; 02067 } 02068 return enc->codec_id != CODEC_ID_NONE && val != 0; 02069 } 02070 02071 static int has_decode_delay_been_guessed(AVStream *st) 02072 { 02073 return st->codec->codec_id != CODEC_ID_H264 || 02074 st->codec_info_nb_frames >= 6 + st->codec->has_b_frames; 02075 } 02076 02077 static int try_decode_frame(AVStream *st, AVPacket *avpkt) 02078 { 02079 int16_t *samples; 02080 AVCodec *codec; 02081 int got_picture, data_size, ret=0; 02082 AVFrame picture; 02083 02084 if(!st->codec->codec){ 02085 codec = avcodec_find_decoder(st->codec->codec_id); 02086 if (!codec) 02087 return -1; 02088 ret = avcodec_open(st->codec, codec); 02089 if (ret < 0) 02090 return ret; 02091 } 02092 02093 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)){ 02094 switch(st->codec->codec_type) { 02095 case AVMEDIA_TYPE_VIDEO: 02096 avcodec_get_frame_defaults(&picture); 02097 ret = avcodec_decode_video2(st->codec, &picture, 02098 &got_picture, avpkt); 02099 break; 02100 case AVMEDIA_TYPE_AUDIO: 02101 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE); 02102 samples = av_malloc(data_size); 02103 if (!samples) 02104 goto fail; 02105 ret = avcodec_decode_audio3(st->codec, samples, 02106 &data_size, avpkt); 02107 av_free(samples); 02108 break; 02109 default: 02110 break; 02111 } 02112 } 02113 fail: 02114 return ret; 02115 } 02116 02117 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id) 02118 { 02119 while (tags->id != CODEC_ID_NONE) { 02120 if (tags->id == id) 02121 return tags->tag; 02122 tags++; 02123 } 02124 return 0; 02125 } 02126 02127 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag) 02128 { 02129 int i; 02130 for(i=0; tags[i].id != CODEC_ID_NONE;i++) { 02131 if(tag == tags[i].tag) 02132 return tags[i].id; 02133 } 02134 for(i=0; tags[i].id != CODEC_ID_NONE; i++) { 02135 if (ff_toupper4(tag) == ff_toupper4(tags[i].tag)) 02136 return tags[i].id; 02137 } 02138 return CODEC_ID_NONE; 02139 } 02140 02141 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id) 02142 { 02143 int i; 02144 for(i=0; tags && tags[i]; i++){ 02145 int tag= ff_codec_get_tag(tags[i], id); 02146 if(tag) return tag; 02147 } 02148 return 0; 02149 } 02150 02151 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag) 02152 { 02153 int i; 02154 for(i=0; tags && tags[i]; i++){ 02155 enum CodecID id= ff_codec_get_id(tags[i], tag); 02156 if(id!=CODEC_ID_NONE) return id; 02157 } 02158 return CODEC_ID_NONE; 02159 } 02160 02161 static void compute_chapters_end(AVFormatContext *s) 02162 { 02163 unsigned int i, j; 02164 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time); 02165 02166 for (i = 0; i < s->nb_chapters; i++) 02167 if (s->chapters[i]->end == AV_NOPTS_VALUE) { 02168 AVChapter *ch = s->chapters[i]; 02169 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base) 02170 : INT64_MAX; 02171 02172 for (j = 0; j < s->nb_chapters; j++) { 02173 AVChapter *ch1 = s->chapters[j]; 02174 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base); 02175 if (j != i && next_start > ch->start && next_start < end) 02176 end = next_start; 02177 } 02178 ch->end = (end == INT64_MAX) ? ch->start : end; 02179 } 02180 } 02181 02182 static int get_std_framerate(int i){ 02183 if(i<60*12) return i*1001; 02184 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12; 02185 } 02186 02187 /* 02188 * Is the time base unreliable. 02189 * This is a heuristic to balance between quick acceptance of the values in 02190 * the headers vs. some extra checks. 02191 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps. 02192 * MPEG-2 commonly misuses field repeat flags to store different framerates. 02193 * And there are "variable" fps files this needs to detect as well. 02194 */ 02195 static int tb_unreliable(AVCodecContext *c){ 02196 if( c->time_base.den >= 101L*c->time_base.num 02197 || c->time_base.den < 5L*c->time_base.num 02198 /* || c->codec_tag == AV_RL32("DIVX") 02199 || c->codec_tag == AV_RL32("XVID")*/ 02200 || c->codec_id == CODEC_ID_MPEG2VIDEO 02201 || c->codec_id == CODEC_ID_H264 02202 ) 02203 return 1; 02204 return 0; 02205 } 02206 02207 int av_find_stream_info(AVFormatContext *ic) 02208 { 02209 int i, count, ret, read_size, j; 02210 AVStream *st; 02211 AVPacket pkt1, *pkt; 02212 int64_t old_offset = avio_tell(ic->pb); 02213 02214 for(i=0;i<ic->nb_streams;i++) { 02215 AVCodec *codec; 02216 st = ic->streams[i]; 02217 if (st->codec->codec_id == CODEC_ID_AAC) { 02218 st->codec->sample_rate = 0; 02219 st->codec->frame_size = 0; 02220 st->codec->channels = 0; 02221 } 02222 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO || 02223 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { 02224 /* if(!st->time_base.num) 02225 st->time_base= */ 02226 if(!st->codec->time_base.num) 02227 st->codec->time_base= st->time_base; 02228 } 02229 //only for the split stuff 02230 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) { 02231 st->parser = av_parser_init(st->codec->codec_id); 02232 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){ 02233 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; 02234 } 02235 } 02236 assert(!st->codec->codec); 02237 codec = avcodec_find_decoder(st->codec->codec_id); 02238 02239 /* Force decoding of at least one frame of codec data 02240 * this makes sure the codec initializes the channel configuration 02241 * and does not trust the values from the container. 02242 */ 02243 if (codec && codec->capabilities & CODEC_CAP_CHANNEL_CONF) 02244 st->codec->channels = 0; 02245 02246 /* Ensure that subtitle_header is properly set. */ 02247 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE 02248 && codec && !st->codec->codec) 02249 avcodec_open(st->codec, codec); 02250 02251 //try to just open decoders, in case this is enough to get parameters 02252 if(!has_codec_parameters(st->codec)){ 02253 if (codec && !st->codec->codec) 02254 avcodec_open(st->codec, codec); 02255 } 02256 } 02257 02258 for (i=0; i<ic->nb_streams; i++) { 02259 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE; 02260 } 02261 02262 count = 0; 02263 read_size = 0; 02264 for(;;) { 02265 if(url_interrupt_cb()){ 02266 ret= AVERROR_EXIT; 02267 av_log(ic, AV_LOG_DEBUG, "interrupted\n"); 02268 break; 02269 } 02270 02271 /* check if one codec still needs to be handled */ 02272 for(i=0;i<ic->nb_streams;i++) { 02273 int fps_analyze_framecount = 20; 02274 02275 st = ic->streams[i]; 02276 if (!has_codec_parameters(st->codec)) 02277 break; 02278 /* if the timebase is coarse (like the usual millisecond precision 02279 of mkv), we need to analyze more frames to reliably arrive at 02280 the correct fps */ 02281 if (av_q2d(st->time_base) > 0.0005) 02282 fps_analyze_framecount *= 2; 02283 if (ic->fps_probe_size >= 0) 02284 fps_analyze_framecount = ic->fps_probe_size; 02285 /* variable fps and no guess at the real fps */ 02286 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num) 02287 && st->info->duration_count < fps_analyze_framecount 02288 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 02289 break; 02290 if(st->parser && st->parser->parser->split && !st->codec->extradata) 02291 break; 02292 if(st->first_dts == AV_NOPTS_VALUE) 02293 break; 02294 } 02295 if (i == ic->nb_streams) { 02296 /* NOTE: if the format has no header, then we need to read 02297 some packets to get most of the streams, so we cannot 02298 stop here */ 02299 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) { 02300 /* if we found the info for all the codecs, we can stop */ 02301 ret = count; 02302 av_log(ic, AV_LOG_DEBUG, "All info found\n"); 02303 break; 02304 } 02305 } 02306 /* we did not get all the codec info, but we read too much data */ 02307 if (read_size >= ic->probesize) { 02308 ret = count; 02309 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize); 02310 break; 02311 } 02312 02313 /* NOTE: a new stream can be added there if no header in file 02314 (AVFMTCTX_NOHEADER) */ 02315 ret = av_read_frame_internal(ic, &pkt1); 02316 if (ret < 0 && ret != AVERROR(EAGAIN)) { 02317 /* EOF or error */ 02318 ret = -1; /* we could not have all the codec parameters before EOF */ 02319 for(i=0;i<ic->nb_streams;i++) { 02320 st = ic->streams[i]; 02321 if (!has_codec_parameters(st->codec)){ 02322 char buf[256]; 02323 avcodec_string(buf, sizeof(buf), st->codec, 0); 02324 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf); 02325 } else { 02326 ret = 0; 02327 } 02328 } 02329 break; 02330 } 02331 02332 if (ret == AVERROR(EAGAIN)) 02333 continue; 02334 02335 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end); 02336 if ((ret = av_dup_packet(pkt)) < 0) 02337 goto find_stream_info_err; 02338 02339 read_size += pkt->size; 02340 02341 st = ic->streams[pkt->stream_index]; 02342 if (st->codec_info_nb_frames>1) { 02343 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) { 02344 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n"); 02345 break; 02346 } 02347 st->info->codec_info_duration += pkt->duration; 02348 } 02349 { 02350 int64_t last = st->info->last_dts; 02351 int64_t duration= pkt->dts - last; 02352 02353 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){ 02354 double dur= duration * av_q2d(st->time_base); 02355 02356 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 02357 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur); 02358 if (st->info->duration_count < 2) 02359 memset(st->info->duration_error, 0, sizeof(st->info->duration_error)); 02360 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) { 02361 int framerate= get_std_framerate(i); 02362 int ticks= lrintf(dur*framerate/(1001*12)); 02363 double error= dur - ticks*1001*12/(double)framerate; 02364 st->info->duration_error[i] += error*error; 02365 } 02366 st->info->duration_count++; 02367 // ignore the first 4 values, they might have some random jitter 02368 if (st->info->duration_count > 3) 02369 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration); 02370 } 02371 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1) 02372 st->info->last_dts = pkt->dts; 02373 } 02374 if(st->parser && st->parser->parser->split && !st->codec->extradata){ 02375 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size); 02376 if(i){ 02377 st->codec->extradata_size= i; 02378 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); 02379 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size); 02380 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE); 02381 } 02382 } 02383 02384 /* if still no information, we try to open the codec and to 02385 decompress the frame. We try to avoid that in most cases as 02386 it takes longer and uses more memory. For MPEG-4, we need to 02387 decompress for QuickTime. */ 02388 if (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)) 02389 try_decode_frame(st, pkt); 02390 02391 st->codec_info_nb_frames++; 02392 count++; 02393 } 02394 02395 // close codecs which were opened in try_decode_frame() 02396 for(i=0;i<ic->nb_streams;i++) { 02397 st = ic->streams[i]; 02398 if(st->codec->codec) 02399 avcodec_close(st->codec); 02400 } 02401 for(i=0;i<ic->nb_streams;i++) { 02402 st = ic->streams[i]; 02403 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration) 02404 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, 02405 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den, 02406 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000); 02407 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 02408 // the check for tb_unreliable() is not completely correct, since this is not about handling 02409 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g. 02410 // ipmovie.c produces. 02411 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num) 02412 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX); 02413 if (st->info->duration_count && !st->r_frame_rate.num 02414 && tb_unreliable(st->codec) /*&& 02415 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ... 02416 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){ 02417 int num = 0; 02418 double best_error= 2*av_q2d(st->time_base); 02419 best_error = best_error*best_error*st->info->duration_count*1000*12*30; 02420 02421 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) { 02422 double error = st->info->duration_error[j] * get_std_framerate(j); 02423 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 02424 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error); 02425 if(error < best_error){ 02426 best_error= error; 02427 num = get_std_framerate(j); 02428 } 02429 } 02430 // do not increase frame rate by more than 1 % in order to match a standard rate. 02431 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate))) 02432 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX); 02433 } 02434 02435 if (!st->r_frame_rate.num){ 02436 if( st->codec->time_base.den * (int64_t)st->time_base.num 02437 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){ 02438 st->r_frame_rate.num = st->codec->time_base.den; 02439 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame; 02440 }else{ 02441 st->r_frame_rate.num = st->time_base.den; 02442 st->r_frame_rate.den = st->time_base.num; 02443 } 02444 } 02445 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { 02446 if(!st->codec->bits_per_coded_sample) 02447 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id); 02448 // set stream disposition based on audio service type 02449 switch (st->codec->audio_service_type) { 02450 case AV_AUDIO_SERVICE_TYPE_EFFECTS: 02451 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break; 02452 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED: 02453 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break; 02454 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED: 02455 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break; 02456 case AV_AUDIO_SERVICE_TYPE_COMMENTARY: 02457 st->disposition = AV_DISPOSITION_COMMENT; break; 02458 case AV_AUDIO_SERVICE_TYPE_KARAOKE: 02459 st->disposition = AV_DISPOSITION_KARAOKE; break; 02460 } 02461 } 02462 } 02463 02464 av_estimate_timings(ic, old_offset); 02465 02466 compute_chapters_end(ic); 02467 02468 #if 0 02469 /* correct DTS for B-frame streams with no timestamps */ 02470 for(i=0;i<ic->nb_streams;i++) { 02471 st = ic->streams[i]; 02472 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 02473 if(b-frames){ 02474 ppktl = &ic->packet_buffer; 02475 while(ppkt1){ 02476 if(ppkt1->stream_index != i) 02477 continue; 02478 if(ppkt1->pkt->dts < 0) 02479 break; 02480 if(ppkt1->pkt->pts != AV_NOPTS_VALUE) 02481 break; 02482 ppkt1->pkt->dts -= delta; 02483 ppkt1= ppkt1->next; 02484 } 02485 if(ppkt1) 02486 continue; 02487 st->cur_dts -= delta; 02488 } 02489 } 02490 } 02491 #endif 02492 02493 find_stream_info_err: 02494 for (i=0; i < ic->nb_streams; i++) 02495 av_freep(&ic->streams[i]->info); 02496 return ret; 02497 } 02498 02499 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s) 02500 { 02501 int i, j; 02502 02503 for (i = 0; i < ic->nb_programs; i++) 02504 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++) 02505 if (ic->programs[i]->stream_index[j] == s) 02506 return ic->programs[i]; 02507 return NULL; 02508 } 02509 02510 int av_find_best_stream(AVFormatContext *ic, 02511 enum AVMediaType type, 02512 int wanted_stream_nb, 02513 int related_stream, 02514 AVCodec **decoder_ret, 02515 int flags) 02516 { 02517 int i, nb_streams = ic->nb_streams; 02518 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1; 02519 unsigned *program = NULL; 02520 AVCodec *decoder = NULL, *best_decoder = NULL; 02521 02522 if (related_stream >= 0 && wanted_stream_nb < 0) { 02523 AVProgram *p = find_program_from_stream(ic, related_stream); 02524 if (p) { 02525 program = p->stream_index; 02526 nb_streams = p->nb_stream_indexes; 02527 } 02528 } 02529 for (i = 0; i < nb_streams; i++) { 02530 int real_stream_index = program ? program[i] : i; 02531 AVStream *st = ic->streams[real_stream_index]; 02532 AVCodecContext *avctx = st->codec; 02533 if (avctx->codec_type != type) 02534 continue; 02535 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb) 02536 continue; 02537 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED)) 02538 continue; 02539 if (decoder_ret) { 02540 decoder = avcodec_find_decoder(st->codec->codec_id); 02541 if (!decoder) { 02542 if (ret < 0) 02543 ret = AVERROR_DECODER_NOT_FOUND; 02544 continue; 02545 } 02546 } 02547 if (best_count >= st->codec_info_nb_frames) 02548 continue; 02549 best_count = st->codec_info_nb_frames; 02550 ret = real_stream_index; 02551 best_decoder = decoder; 02552 if (program && i == nb_streams - 1 && ret < 0) { 02553 program = NULL; 02554 nb_streams = ic->nb_streams; 02555 i = 0; /* no related stream found, try again with everything */ 02556 } 02557 } 02558 if (decoder_ret) 02559 *decoder_ret = best_decoder; 02560 return ret; 02561 } 02562 02563 /*******************************************************/ 02564 02565 int av_read_play(AVFormatContext *s) 02566 { 02567 if (s->iformat->read_play) 02568 return s->iformat->read_play(s); 02569 if (s->pb) 02570 return avio_pause(s->pb, 0); 02571 return AVERROR(ENOSYS); 02572 } 02573 02574 int av_read_pause(AVFormatContext *s) 02575 { 02576 if (s->iformat->read_pause) 02577 return s->iformat->read_pause(s); 02578 if (s->pb) 02579 return avio_pause(s->pb, 1); 02580 return AVERROR(ENOSYS); 02581 } 02582 02583 void av_close_input_stream(AVFormatContext *s) 02584 { 02585 flush_packet_queue(s); 02586 if (s->iformat->read_close) 02587 s->iformat->read_close(s); 02588 avformat_free_context(s); 02589 } 02590 02591 void avformat_free_context(AVFormatContext *s) 02592 { 02593 int i; 02594 AVStream *st; 02595 02596 av_opt_free(s); 02597 if (s->iformat && s->iformat->priv_class && s->priv_data) 02598 av_opt_free(s->priv_data); 02599 02600 for(i=0;i<s->nb_streams;i++) { 02601 /* free all data in a stream component */ 02602 st = s->streams[i]; 02603 if (st->parser) { 02604 av_parser_close(st->parser); 02605 av_free_packet(&st->cur_pkt); 02606 } 02607 av_dict_free(&st->metadata); 02608 av_free(st->index_entries); 02609 av_free(st->codec->extradata); 02610 av_free(st->codec->subtitle_header); 02611 av_free(st->codec); 02612 av_free(st->priv_data); 02613 av_free(st->info); 02614 av_free(st); 02615 } 02616 for(i=s->nb_programs-1; i>=0; i--) { 02617 av_dict_free(&s->programs[i]->metadata); 02618 av_freep(&s->programs[i]->stream_index); 02619 av_freep(&s->programs[i]); 02620 } 02621 av_freep(&s->programs); 02622 av_freep(&s->priv_data); 02623 while(s->nb_chapters--) { 02624 av_dict_free(&s->chapters[s->nb_chapters]->metadata); 02625 av_free(s->chapters[s->nb_chapters]); 02626 } 02627 av_freep(&s->chapters); 02628 av_dict_free(&s->metadata); 02629 av_freep(&s->streams); 02630 av_free(s); 02631 } 02632 02633 void av_close_input_file(AVFormatContext *s) 02634 { 02635 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ? 02636 NULL : s->pb; 02637 av_close_input_stream(s); 02638 if (pb) 02639 avio_close(pb); 02640 } 02641 02642 AVStream *av_new_stream(AVFormatContext *s, int id) 02643 { 02644 AVStream *st; 02645 int i; 02646 AVStream **streams; 02647 02648 if (s->nb_streams >= INT_MAX/sizeof(*streams)) 02649 return NULL; 02650 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams)); 02651 if (!streams) 02652 return NULL; 02653 s->streams = streams; 02654 02655 st = av_mallocz(sizeof(AVStream)); 02656 if (!st) 02657 return NULL; 02658 if (!(st->info = av_mallocz(sizeof(*st->info)))) { 02659 av_free(st); 02660 return NULL; 02661 } 02662 02663 st->codec= avcodec_alloc_context(); 02664 if (s->iformat) { 02665 /* no default bitrate if decoding */ 02666 st->codec->bit_rate = 0; 02667 } 02668 st->index = s->nb_streams; 02669 st->id = id; 02670 st->start_time = AV_NOPTS_VALUE; 02671 st->duration = AV_NOPTS_VALUE; 02672 /* we set the current DTS to 0 so that formats without any timestamps 02673 but durations get some timestamps, formats with some unknown 02674 timestamps have their first few packets buffered and the 02675 timestamps corrected before they are returned to the user */ 02676 st->cur_dts = 0; 02677 st->first_dts = AV_NOPTS_VALUE; 02678 st->probe_packets = MAX_PROBE_PACKETS; 02679 02680 /* default pts setting is MPEG-like */ 02681 av_set_pts_info(st, 33, 1, 90000); 02682 st->last_IP_pts = AV_NOPTS_VALUE; 02683 for(i=0; i<MAX_REORDER_DELAY+1; i++) 02684 st->pts_buffer[i]= AV_NOPTS_VALUE; 02685 st->reference_dts = AV_NOPTS_VALUE; 02686 02687 st->sample_aspect_ratio = (AVRational){0,1}; 02688 02689 s->streams[s->nb_streams++] = st; 02690 return st; 02691 } 02692 02693 AVProgram *av_new_program(AVFormatContext *ac, int id) 02694 { 02695 AVProgram *program=NULL; 02696 int i; 02697 02698 av_dlog(ac, "new_program: id=0x%04x\n", id); 02699 02700 for(i=0; i<ac->nb_programs; i++) 02701 if(ac->programs[i]->id == id) 02702 program = ac->programs[i]; 02703 02704 if(!program){ 02705 program = av_mallocz(sizeof(AVProgram)); 02706 if (!program) 02707 return NULL; 02708 dynarray_add(&ac->programs, &ac->nb_programs, program); 02709 program->discard = AVDISCARD_NONE; 02710 } 02711 program->id = id; 02712 02713 return program; 02714 } 02715 02716 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title) 02717 { 02718 AVChapter *chapter = NULL; 02719 int i; 02720 02721 for(i=0; i<s->nb_chapters; i++) 02722 if(s->chapters[i]->id == id) 02723 chapter = s->chapters[i]; 02724 02725 if(!chapter){ 02726 chapter= av_mallocz(sizeof(AVChapter)); 02727 if(!chapter) 02728 return NULL; 02729 dynarray_add(&s->chapters, &s->nb_chapters, chapter); 02730 } 02731 av_dict_set(&chapter->metadata, "title", title, 0); 02732 chapter->id = id; 02733 chapter->time_base= time_base; 02734 chapter->start = start; 02735 chapter->end = end; 02736 02737 return chapter; 02738 } 02739 02740 /************************************************************/ 02741 /* output media file */ 02742 02743 #if FF_API_FORMAT_PARAMETERS 02744 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap) 02745 { 02746 int ret; 02747 02748 if (s->oformat->priv_data_size > 0) { 02749 s->priv_data = av_mallocz(s->oformat->priv_data_size); 02750 if (!s->priv_data) 02751 return AVERROR(ENOMEM); 02752 if (s->oformat->priv_class) { 02753 *(const AVClass**)s->priv_data= s->oformat->priv_class; 02754 av_opt_set_defaults(s->priv_data); 02755 } 02756 } else 02757 s->priv_data = NULL; 02758 02759 if (s->oformat->set_parameters) { 02760 ret = s->oformat->set_parameters(s, ap); 02761 if (ret < 0) 02762 return ret; 02763 } 02764 return 0; 02765 } 02766 #endif 02767 02768 static int validate_codec_tag(AVFormatContext *s, AVStream *st) 02769 { 02770 const AVCodecTag *avctag; 02771 int n; 02772 enum CodecID id = CODEC_ID_NONE; 02773 unsigned int tag = 0; 02774 02781 for (n = 0; s->oformat->codec_tag[n]; n++) { 02782 avctag = s->oformat->codec_tag[n]; 02783 while (avctag->id != CODEC_ID_NONE) { 02784 if (ff_toupper4(avctag->tag) == ff_toupper4(st->codec->codec_tag)) { 02785 id = avctag->id; 02786 if (id == st->codec->codec_id) 02787 return 1; 02788 } 02789 if (avctag->id == st->codec->codec_id) 02790 tag = avctag->tag; 02791 avctag++; 02792 } 02793 } 02794 if (id != CODEC_ID_NONE) 02795 return 0; 02796 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL)) 02797 return 0; 02798 return 1; 02799 } 02800 02801 #if FF_API_FORMAT_PARAMETERS 02802 int av_write_header(AVFormatContext *s) 02803 { 02804 return avformat_write_header(s, NULL); 02805 } 02806 #endif 02807 02808 int avformat_write_header(AVFormatContext *s, AVDictionary **options) 02809 { 02810 int ret = 0, i; 02811 AVStream *st; 02812 AVDictionary *tmp = NULL; 02813 02814 if (options) 02815 av_dict_copy(&tmp, *options, 0); 02816 if ((ret = av_opt_set_dict(s, &tmp)) < 0) 02817 goto fail; 02818 02819 // some sanity checks 02820 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) { 02821 av_log(s, AV_LOG_ERROR, "no streams\n"); 02822 ret = AVERROR(EINVAL); 02823 goto fail; 02824 } 02825 02826 for(i=0;i<s->nb_streams;i++) { 02827 st = s->streams[i]; 02828 02829 switch (st->codec->codec_type) { 02830 case AVMEDIA_TYPE_AUDIO: 02831 if(st->codec->sample_rate<=0){ 02832 av_log(s, AV_LOG_ERROR, "sample rate not set\n"); 02833 ret = AVERROR(EINVAL); 02834 goto fail; 02835 } 02836 if(!st->codec->block_align) 02837 st->codec->block_align = st->codec->channels * 02838 av_get_bits_per_sample(st->codec->codec_id) >> 3; 02839 break; 02840 case AVMEDIA_TYPE_VIDEO: 02841 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too? 02842 av_log(s, AV_LOG_ERROR, "time base not set\n"); 02843 ret = AVERROR(EINVAL); 02844 goto fail; 02845 } 02846 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){ 02847 av_log(s, AV_LOG_ERROR, "dimensions not set\n"); 02848 ret = AVERROR(EINVAL); 02849 goto fail; 02850 } 02851 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){ 02852 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n"); 02853 ret = AVERROR(EINVAL); 02854 goto fail; 02855 } 02856 break; 02857 } 02858 02859 if(s->oformat->codec_tag){ 02860 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){ 02861 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here 02862 st->codec->codec_tag= 0; 02863 } 02864 if(st->codec->codec_tag){ 02865 if (!validate_codec_tag(s, st)) { 02866 char tagbuf[32]; 02867 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag); 02868 av_log(s, AV_LOG_ERROR, 02869 "Tag %s/0x%08x incompatible with output codec id '%d'\n", 02870 tagbuf, st->codec->codec_tag, st->codec->codec_id); 02871 ret = AVERROR_INVALIDDATA; 02872 goto fail; 02873 } 02874 }else 02875 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id); 02876 } 02877 02878 if(s->oformat->flags & AVFMT_GLOBALHEADER && 02879 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER)) 02880 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i); 02881 } 02882 02883 if (!s->priv_data && s->oformat->priv_data_size > 0) { 02884 s->priv_data = av_mallocz(s->oformat->priv_data_size); 02885 if (!s->priv_data) { 02886 ret = AVERROR(ENOMEM); 02887 goto fail; 02888 } 02889 if (s->oformat->priv_class) { 02890 *(const AVClass**)s->priv_data= s->oformat->priv_class; 02891 av_opt_set_defaults(s->priv_data); 02892 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0) 02893 goto fail; 02894 } 02895 } 02896 02897 /* set muxer identification string */ 02898 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) { 02899 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0); 02900 } 02901 02902 if(s->oformat->write_header){ 02903 ret = s->oformat->write_header(s); 02904 if (ret < 0) 02905 goto fail; 02906 } 02907 02908 /* init PTS generation */ 02909 for(i=0;i<s->nb_streams;i++) { 02910 int64_t den = AV_NOPTS_VALUE; 02911 st = s->streams[i]; 02912 02913 switch (st->codec->codec_type) { 02914 case AVMEDIA_TYPE_AUDIO: 02915 den = (int64_t)st->time_base.num * st->codec->sample_rate; 02916 break; 02917 case AVMEDIA_TYPE_VIDEO: 02918 den = (int64_t)st->time_base.num * st->codec->time_base.den; 02919 break; 02920 default: 02921 break; 02922 } 02923 if (den != AV_NOPTS_VALUE) { 02924 if (den <= 0) { 02925 ret = AVERROR_INVALIDDATA; 02926 goto fail; 02927 } 02928 av_frac_init(&st->pts, 0, 0, den); 02929 } 02930 } 02931 02932 if (options) { 02933 av_dict_free(options); 02934 *options = tmp; 02935 } 02936 return 0; 02937 fail: 02938 av_dict_free(&tmp); 02939 return ret; 02940 } 02941 02942 //FIXME merge with compute_pkt_fields 02943 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){ 02944 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames); 02945 int num, den, frame_size, i; 02946 02947 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", 02948 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index); 02949 02950 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) 02951 return AVERROR(EINVAL);*/ 02952 02953 /* duration field */ 02954 if (pkt->duration == 0) { 02955 compute_frame_duration(&num, &den, st, NULL, pkt); 02956 if (den && num) { 02957 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num); 02958 } 02959 } 02960 02961 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0) 02962 pkt->pts= pkt->dts; 02963 02964 //XXX/FIXME this is a temporary hack until all encoders output pts 02965 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){ 02966 pkt->dts= 02967 // pkt->pts= st->cur_dts; 02968 pkt->pts= st->pts.val; 02969 } 02970 02971 //calculate dts from pts 02972 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ 02973 st->pts_buffer[0]= pkt->pts; 02974 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++) 02975 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration; 02976 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) 02977 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); 02978 02979 pkt->dts= st->pts_buffer[0]; 02980 } 02981 02982 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){ 02983 av_log(s, AV_LOG_ERROR, 02984 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n", 02985 st->index, st->cur_dts, pkt->dts); 02986 return AVERROR(EINVAL); 02987 } 02988 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){ 02989 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index); 02990 return AVERROR(EINVAL); 02991 } 02992 02993 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts); 02994 st->cur_dts= pkt->dts; 02995 st->pts.val= pkt->dts; 02996 02997 /* update pts */ 02998 switch (st->codec->codec_type) { 02999 case AVMEDIA_TYPE_AUDIO: 03000 frame_size = get_audio_frame_size(st->codec, pkt->size); 03001 03002 /* HACK/FIXME, we skip the initial 0 size packets as they are most 03003 likely equal to the encoder delay, but it would be better if we 03004 had the real timestamps from the encoder */ 03005 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) { 03006 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); 03007 } 03008 break; 03009 case AVMEDIA_TYPE_VIDEO: 03010 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num); 03011 break; 03012 default: 03013 break; 03014 } 03015 return 0; 03016 } 03017 03018 int av_write_frame(AVFormatContext *s, AVPacket *pkt) 03019 { 03020 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt); 03021 03022 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 03023 return ret; 03024 03025 ret= s->oformat->write_packet(s, pkt); 03026 return ret; 03027 } 03028 03029 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt, 03030 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *)) 03031 { 03032 AVPacketList **next_point, *this_pktl; 03033 03034 this_pktl = av_mallocz(sizeof(AVPacketList)); 03035 this_pktl->pkt= *pkt; 03036 pkt->destruct= NULL; // do not free original but only the copy 03037 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory 03038 03039 if(s->streams[pkt->stream_index]->last_in_packet_buffer){ 03040 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next); 03041 }else 03042 next_point = &s->packet_buffer; 03043 03044 if(*next_point){ 03045 if(compare(s, &s->packet_buffer_end->pkt, pkt)){ 03046 while(!compare(s, &(*next_point)->pkt, pkt)){ 03047 next_point= &(*next_point)->next; 03048 } 03049 goto next_non_null; 03050 }else{ 03051 next_point = &(s->packet_buffer_end->next); 03052 } 03053 } 03054 assert(!*next_point); 03055 03056 s->packet_buffer_end= this_pktl; 03057 next_non_null: 03058 03059 this_pktl->next= *next_point; 03060 03061 s->streams[pkt->stream_index]->last_in_packet_buffer= 03062 *next_point= this_pktl; 03063 } 03064 03065 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt) 03066 { 03067 AVStream *st = s->streams[ pkt ->stream_index]; 03068 AVStream *st2= s->streams[ next->stream_index]; 03069 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts, 03070 st->time_base); 03071 03072 if (comp == 0) 03073 return pkt->stream_index < next->stream_index; 03074 return comp > 0; 03075 } 03076 03077 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){ 03078 AVPacketList *pktl; 03079 int stream_count=0; 03080 int i; 03081 03082 if(pkt){ 03083 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts); 03084 } 03085 03086 for(i=0; i < s->nb_streams; i++) 03087 stream_count+= !!s->streams[i]->last_in_packet_buffer; 03088 03089 if(stream_count && (s->nb_streams == stream_count || flush)){ 03090 pktl= s->packet_buffer; 03091 *out= pktl->pkt; 03092 03093 s->packet_buffer= pktl->next; 03094 if(!s->packet_buffer) 03095 s->packet_buffer_end= NULL; 03096 03097 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl) 03098 s->streams[out->stream_index]->last_in_packet_buffer= NULL; 03099 av_freep(&pktl); 03100 return 1; 03101 }else{ 03102 av_init_packet(out); 03103 return 0; 03104 } 03105 } 03106 03116 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){ 03117 if(s->oformat->interleave_packet) 03118 return s->oformat->interleave_packet(s, out, in, flush); 03119 else 03120 return av_interleave_packet_per_dts(s, out, in, flush); 03121 } 03122 03123 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){ 03124 AVStream *st= s->streams[ pkt->stream_index]; 03125 int ret; 03126 03127 //FIXME/XXX/HACK drop zero sized packets 03128 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0) 03129 return 0; 03130 03131 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n", 03132 pkt->size, pkt->dts, pkt->pts); 03133 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 03134 return ret; 03135 03136 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 03137 return AVERROR(EINVAL); 03138 03139 for(;;){ 03140 AVPacket opkt; 03141 int ret= av_interleave_packet(s, &opkt, pkt, 0); 03142 if(ret<=0) //FIXME cleanup needed for ret<0 ? 03143 return ret; 03144 03145 ret= s->oformat->write_packet(s, &opkt); 03146 03147 av_free_packet(&opkt); 03148 pkt= NULL; 03149 03150 if(ret<0) 03151 return ret; 03152 } 03153 } 03154 03155 int av_write_trailer(AVFormatContext *s) 03156 { 03157 int ret, i; 03158 03159 for(;;){ 03160 AVPacket pkt; 03161 ret= av_interleave_packet(s, &pkt, NULL, 1); 03162 if(ret<0) //FIXME cleanup needed for ret<0 ? 03163 goto fail; 03164 if(!ret) 03165 break; 03166 03167 ret= s->oformat->write_packet(s, &pkt); 03168 03169 av_free_packet(&pkt); 03170 03171 if(ret<0) 03172 goto fail; 03173 } 03174 03175 if(s->oformat->write_trailer) 03176 ret = s->oformat->write_trailer(s); 03177 fail: 03178 for(i=0;i<s->nb_streams;i++) { 03179 av_freep(&s->streams[i]->priv_data); 03180 av_freep(&s->streams[i]->index_entries); 03181 } 03182 if (s->iformat && s->iformat->priv_class) 03183 av_opt_free(s->priv_data); 03184 av_freep(&s->priv_data); 03185 return ret; 03186 } 03187 03188 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx) 03189 { 03190 int i, j; 03191 AVProgram *program=NULL; 03192 void *tmp; 03193 03194 if (idx >= ac->nb_streams) { 03195 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx); 03196 return; 03197 } 03198 03199 for(i=0; i<ac->nb_programs; i++){ 03200 if(ac->programs[i]->id != progid) 03201 continue; 03202 program = ac->programs[i]; 03203 for(j=0; j<program->nb_stream_indexes; j++) 03204 if(program->stream_index[j] == idx) 03205 return; 03206 03207 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1)); 03208 if(!tmp) 03209 return; 03210 program->stream_index = tmp; 03211 program->stream_index[program->nb_stream_indexes++] = idx; 03212 return; 03213 } 03214 } 03215 03216 static void print_fps(double d, const char *postfix){ 03217 uint64_t v= lrintf(d*100); 03218 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix); 03219 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix); 03220 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix); 03221 } 03222 03223 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent) 03224 { 03225 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){ 03226 AVDictionaryEntry *tag=NULL; 03227 03228 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent); 03229 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) { 03230 if(strcmp("language", tag->key)) 03231 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value); 03232 } 03233 } 03234 } 03235 03236 /* "user interface" functions */ 03237 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) 03238 { 03239 char buf[256]; 03240 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); 03241 AVStream *st = ic->streams[i]; 03242 int g = av_gcd(st->time_base.num, st->time_base.den); 03243 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0); 03244 avcodec_string(buf, sizeof(buf), st->codec, is_output); 03245 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i); 03246 /* the pid is an important information, so we display it */ 03247 /* XXX: add a generic system */ 03248 if (flags & AVFMT_SHOW_IDS) 03249 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id); 03250 if (lang) 03251 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value); 03252 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g); 03253 av_log(NULL, AV_LOG_INFO, ": %s", buf); 03254 if (st->sample_aspect_ratio.num && // default 03255 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) { 03256 AVRational display_aspect_ratio; 03257 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, 03258 st->codec->width*st->sample_aspect_ratio.num, 03259 st->codec->height*st->sample_aspect_ratio.den, 03260 1024*1024); 03261 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d", 03262 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, 03263 display_aspect_ratio.num, display_aspect_ratio.den); 03264 } 03265 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){ 03266 if(st->avg_frame_rate.den && st->avg_frame_rate.num) 03267 print_fps(av_q2d(st->avg_frame_rate), "fps"); 03268 if(st->r_frame_rate.den && st->r_frame_rate.num) 03269 print_fps(av_q2d(st->r_frame_rate), "tbr"); 03270 if(st->time_base.den && st->time_base.num) 03271 print_fps(1/av_q2d(st->time_base), "tbn"); 03272 if(st->codec->time_base.den && st->codec->time_base.num) 03273 print_fps(1/av_q2d(st->codec->time_base), "tbc"); 03274 } 03275 if (st->disposition & AV_DISPOSITION_DEFAULT) 03276 av_log(NULL, AV_LOG_INFO, " (default)"); 03277 if (st->disposition & AV_DISPOSITION_DUB) 03278 av_log(NULL, AV_LOG_INFO, " (dub)"); 03279 if (st->disposition & AV_DISPOSITION_ORIGINAL) 03280 av_log(NULL, AV_LOG_INFO, " (original)"); 03281 if (st->disposition & AV_DISPOSITION_COMMENT) 03282 av_log(NULL, AV_LOG_INFO, " (comment)"); 03283 if (st->disposition & AV_DISPOSITION_LYRICS) 03284 av_log(NULL, AV_LOG_INFO, " (lyrics)"); 03285 if (st->disposition & AV_DISPOSITION_KARAOKE) 03286 av_log(NULL, AV_LOG_INFO, " (karaoke)"); 03287 if (st->disposition & AV_DISPOSITION_FORCED) 03288 av_log(NULL, AV_LOG_INFO, " (forced)"); 03289 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED) 03290 av_log(NULL, AV_LOG_INFO, " (hearing impaired)"); 03291 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED) 03292 av_log(NULL, AV_LOG_INFO, " (visual impaired)"); 03293 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS) 03294 av_log(NULL, AV_LOG_INFO, " (clean effects)"); 03295 av_log(NULL, AV_LOG_INFO, "\n"); 03296 dump_metadata(NULL, st->metadata, " "); 03297 } 03298 03299 #if FF_API_DUMP_FORMAT 03300 void dump_format(AVFormatContext *ic, 03301 int index, 03302 const char *url, 03303 int is_output) 03304 { 03305 av_dump_format(ic, index, url, is_output); 03306 } 03307 #endif 03308 03309 void av_dump_format(AVFormatContext *ic, 03310 int index, 03311 const char *url, 03312 int is_output) 03313 { 03314 int i; 03315 uint8_t *printed = av_mallocz(ic->nb_streams); 03316 if (ic->nb_streams && !printed) 03317 return; 03318 03319 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n", 03320 is_output ? "Output" : "Input", 03321 index, 03322 is_output ? ic->oformat->name : ic->iformat->name, 03323 is_output ? "to" : "from", url); 03324 dump_metadata(NULL, ic->metadata, " "); 03325 if (!is_output) { 03326 av_log(NULL, AV_LOG_INFO, " Duration: "); 03327 if (ic->duration != AV_NOPTS_VALUE) { 03328 int hours, mins, secs, us; 03329 secs = ic->duration / AV_TIME_BASE; 03330 us = ic->duration % AV_TIME_BASE; 03331 mins = secs / 60; 03332 secs %= 60; 03333 hours = mins / 60; 03334 mins %= 60; 03335 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs, 03336 (100 * us) / AV_TIME_BASE); 03337 } else { 03338 av_log(NULL, AV_LOG_INFO, "N/A"); 03339 } 03340 if (ic->start_time != AV_NOPTS_VALUE) { 03341 int secs, us; 03342 av_log(NULL, AV_LOG_INFO, ", start: "); 03343 secs = ic->start_time / AV_TIME_BASE; 03344 us = abs(ic->start_time % AV_TIME_BASE); 03345 av_log(NULL, AV_LOG_INFO, "%d.%06d", 03346 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE)); 03347 } 03348 av_log(NULL, AV_LOG_INFO, ", bitrate: "); 03349 if (ic->bit_rate) { 03350 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000); 03351 } else { 03352 av_log(NULL, AV_LOG_INFO, "N/A"); 03353 } 03354 av_log(NULL, AV_LOG_INFO, "\n"); 03355 } 03356 for (i = 0; i < ic->nb_chapters; i++) { 03357 AVChapter *ch = ic->chapters[i]; 03358 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i); 03359 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base)); 03360 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base)); 03361 03362 dump_metadata(NULL, ch->metadata, " "); 03363 } 03364 if(ic->nb_programs) { 03365 int j, k, total = 0; 03366 for(j=0; j<ic->nb_programs; j++) { 03367 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata, 03368 "name", NULL, 0); 03369 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id, 03370 name ? name->value : ""); 03371 dump_metadata(NULL, ic->programs[j]->metadata, " "); 03372 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) { 03373 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output); 03374 printed[ic->programs[j]->stream_index[k]] = 1; 03375 } 03376 total += ic->programs[j]->nb_stream_indexes; 03377 } 03378 if (total < ic->nb_streams) 03379 av_log(NULL, AV_LOG_INFO, " No Program\n"); 03380 } 03381 for(i=0;i<ic->nb_streams;i++) 03382 if (!printed[i]) 03383 dump_stream_format(ic, i, index, is_output); 03384 03385 av_free(printed); 03386 } 03387 03388 int64_t av_gettime(void) 03389 { 03390 struct timeval tv; 03391 gettimeofday(&tv,NULL); 03392 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec; 03393 } 03394 03395 uint64_t ff_ntp_time(void) 03396 { 03397 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US; 03398 } 03399 03400 #if FF_API_PARSE_DATE 03401 #include "libavutil/parseutils.h" 03402 03403 int64_t parse_date(const char *timestr, int duration) 03404 { 03405 int64_t timeval; 03406 av_parse_time(&timeval, timestr, duration); 03407 return timeval; 03408 } 03409 #endif 03410 03411 #if FF_API_FIND_INFO_TAG 03412 #include "libavutil/parseutils.h" 03413 03414 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info) 03415 { 03416 return av_find_info_tag(arg, arg_size, tag1, info); 03417 } 03418 #endif 03419 03420 int av_get_frame_filename(char *buf, int buf_size, 03421 const char *path, int number) 03422 { 03423 const char *p; 03424 char *q, buf1[20], c; 03425 int nd, len, percentd_found; 03426 03427 q = buf; 03428 p = path; 03429 percentd_found = 0; 03430 for(;;) { 03431 c = *p++; 03432 if (c == '\0') 03433 break; 03434 if (c == '%') { 03435 do { 03436 nd = 0; 03437 while (isdigit(*p)) { 03438 nd = nd * 10 + *p++ - '0'; 03439 } 03440 c = *p++; 03441 } while (isdigit(c)); 03442 03443 switch(c) { 03444 case '%': 03445 goto addchar; 03446 case 'd': 03447 if (percentd_found) 03448 goto fail; 03449 percentd_found = 1; 03450 snprintf(buf1, sizeof(buf1), "%0*d", nd, number); 03451 len = strlen(buf1); 03452 if ((q - buf + len) > buf_size - 1) 03453 goto fail; 03454 memcpy(q, buf1, len); 03455 q += len; 03456 break; 03457 default: 03458 goto fail; 03459 } 03460 } else { 03461 addchar: 03462 if ((q - buf) < buf_size - 1) 03463 *q++ = c; 03464 } 03465 } 03466 if (!percentd_found) 03467 goto fail; 03468 *q = '\0'; 03469 return 0; 03470 fail: 03471 *q = '\0'; 03472 return -1; 03473 } 03474 03475 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size) 03476 { 03477 int len, i, j, c; 03478 #undef fprintf 03479 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) 03480 03481 for(i=0;i<size;i+=16) { 03482 len = size - i; 03483 if (len > 16) 03484 len = 16; 03485 PRINT("%08x ", i); 03486 for(j=0;j<16;j++) { 03487 if (j < len) 03488 PRINT(" %02x", buf[i+j]); 03489 else 03490 PRINT(" "); 03491 } 03492 PRINT(" "); 03493 for(j=0;j<len;j++) { 03494 c = buf[i+j]; 03495 if (c < ' ' || c > '~') 03496 c = '.'; 03497 PRINT("%c", c); 03498 } 03499 PRINT("\n"); 03500 } 03501 #undef PRINT 03502 } 03503 03504 void av_hex_dump(FILE *f, uint8_t *buf, int size) 03505 { 03506 hex_dump_internal(NULL, f, 0, buf, size); 03507 } 03508 03509 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size) 03510 { 03511 hex_dump_internal(avcl, NULL, level, buf, size); 03512 } 03513 03514 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base) 03515 { 03516 #undef fprintf 03517 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) 03518 PRINT("stream #%d:\n", pkt->stream_index); 03519 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0)); 03520 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base)); 03521 /* DTS is _always_ valid after av_read_frame() */ 03522 PRINT(" dts="); 03523 if (pkt->dts == AV_NOPTS_VALUE) 03524 PRINT("N/A"); 03525 else 03526 PRINT("%0.3f", pkt->dts * av_q2d(time_base)); 03527 /* PTS may not be known if B-frames are present. */ 03528 PRINT(" pts="); 03529 if (pkt->pts == AV_NOPTS_VALUE) 03530 PRINT("N/A"); 03531 else 03532 PRINT("%0.3f", pkt->pts * av_q2d(time_base)); 03533 PRINT("\n"); 03534 PRINT(" size=%d\n", pkt->size); 03535 #undef PRINT 03536 if (dump_payload) 03537 av_hex_dump(f, pkt->data, pkt->size); 03538 } 03539 03540 #if FF_API_PKT_DUMP 03541 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload) 03542 { 03543 AVRational tb = { 1, AV_TIME_BASE }; 03544 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb); 03545 } 03546 #endif 03547 03548 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st) 03549 { 03550 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base); 03551 } 03552 03553 #if FF_API_PKT_DUMP 03554 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload) 03555 { 03556 AVRational tb = { 1, AV_TIME_BASE }; 03557 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb); 03558 } 03559 #endif 03560 03561 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload, 03562 AVStream *st) 03563 { 03564 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base); 03565 } 03566 03567 void av_url_split(char *proto, int proto_size, 03568 char *authorization, int authorization_size, 03569 char *hostname, int hostname_size, 03570 int *port_ptr, 03571 char *path, int path_size, 03572 const char *url) 03573 { 03574 const char *p, *ls, *at, *col, *brk; 03575 03576 if (port_ptr) *port_ptr = -1; 03577 if (proto_size > 0) proto[0] = 0; 03578 if (authorization_size > 0) authorization[0] = 0; 03579 if (hostname_size > 0) hostname[0] = 0; 03580 if (path_size > 0) path[0] = 0; 03581 03582 /* parse protocol */ 03583 if ((p = strchr(url, ':'))) { 03584 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url)); 03585 p++; /* skip ':' */ 03586 if (*p == '/') p++; 03587 if (*p == '/') p++; 03588 } else { 03589 /* no protocol means plain filename */ 03590 av_strlcpy(path, url, path_size); 03591 return; 03592 } 03593 03594 /* separate path from hostname */ 03595 ls = strchr(p, '/'); 03596 if(!ls) 03597 ls = strchr(p, '?'); 03598 if(ls) 03599 av_strlcpy(path, ls, path_size); 03600 else 03601 ls = &p[strlen(p)]; // XXX 03602 03603 /* the rest is hostname, use that to parse auth/port */ 03604 if (ls != p) { 03605 /* authorization (user[:pass]@hostname) */ 03606 if ((at = strchr(p, '@')) && at < ls) { 03607 av_strlcpy(authorization, p, 03608 FFMIN(authorization_size, at + 1 - p)); 03609 p = at + 1; /* skip '@' */ 03610 } 03611 03612 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) { 03613 /* [host]:port */ 03614 av_strlcpy(hostname, p + 1, 03615 FFMIN(hostname_size, brk - p)); 03616 if (brk[1] == ':' && port_ptr) 03617 *port_ptr = atoi(brk + 2); 03618 } else if ((col = strchr(p, ':')) && col < ls) { 03619 av_strlcpy(hostname, p, 03620 FFMIN(col + 1 - p, hostname_size)); 03621 if (port_ptr) *port_ptr = atoi(col + 1); 03622 } else 03623 av_strlcpy(hostname, p, 03624 FFMIN(ls + 1 - p, hostname_size)); 03625 } 03626 } 03627 03628 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase) 03629 { 03630 int i; 03631 static const char hex_table_uc[16] = { '0', '1', '2', '3', 03632 '4', '5', '6', '7', 03633 '8', '9', 'A', 'B', 03634 'C', 'D', 'E', 'F' }; 03635 static const char hex_table_lc[16] = { '0', '1', '2', '3', 03636 '4', '5', '6', '7', 03637 '8', '9', 'a', 'b', 03638 'c', 'd', 'e', 'f' }; 03639 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc; 03640 03641 for(i = 0; i < s; i++) { 03642 buff[i * 2] = hex_table[src[i] >> 4]; 03643 buff[i * 2 + 1] = hex_table[src[i] & 0xF]; 03644 } 03645 03646 return buff; 03647 } 03648 03649 int ff_hex_to_data(uint8_t *data, const char *p) 03650 { 03651 int c, len, v; 03652 03653 len = 0; 03654 v = 1; 03655 for (;;) { 03656 p += strspn(p, SPACE_CHARS); 03657 if (*p == '\0') 03658 break; 03659 c = toupper((unsigned char) *p++); 03660 if (c >= '0' && c <= '9') 03661 c = c - '0'; 03662 else if (c >= 'A' && c <= 'F') 03663 c = c - 'A' + 10; 03664 else 03665 break; 03666 v = (v << 4) | c; 03667 if (v & 0x100) { 03668 if (data) 03669 data[len] = v; 03670 len++; 03671 v = 1; 03672 } 03673 } 03674 return len; 03675 } 03676 03677 void av_set_pts_info(AVStream *s, int pts_wrap_bits, 03678 unsigned int pts_num, unsigned int pts_den) 03679 { 03680 AVRational new_tb; 03681 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){ 03682 if(new_tb.num != pts_num) 03683 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num); 03684 }else 03685 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index); 03686 03687 if(new_tb.num <= 0 || new_tb.den <= 0) { 03688 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase for st:%d\n", s->index); 03689 return; 03690 } 03691 s->time_base = new_tb; 03692 s->pts_wrap_bits = pts_wrap_bits; 03693 } 03694 03695 int ff_url_join(char *str, int size, const char *proto, 03696 const char *authorization, const char *hostname, 03697 int port, const char *fmt, ...) 03698 { 03699 #if CONFIG_NETWORK 03700 struct addrinfo hints, *ai; 03701 #endif 03702 03703 str[0] = '\0'; 03704 if (proto) 03705 av_strlcatf(str, size, "%s://", proto); 03706 if (authorization && authorization[0]) 03707 av_strlcatf(str, size, "%s@", authorization); 03708 #if CONFIG_NETWORK && defined(AF_INET6) 03709 /* Determine if hostname is a numerical IPv6 address, 03710 * properly escape it within [] in that case. */ 03711 memset(&hints, 0, sizeof(hints)); 03712 hints.ai_flags = AI_NUMERICHOST; 03713 if (!getaddrinfo(hostname, NULL, &hints, &ai)) { 03714 if (ai->ai_family == AF_INET6) { 03715 av_strlcat(str, "[", size); 03716 av_strlcat(str, hostname, size); 03717 av_strlcat(str, "]", size); 03718 } else { 03719 av_strlcat(str, hostname, size); 03720 } 03721 freeaddrinfo(ai); 03722 } else 03723 #endif 03724 /* Not an IPv6 address, just output the plain string. */ 03725 av_strlcat(str, hostname, size); 03726 03727 if (port >= 0) 03728 av_strlcatf(str, size, ":%d", port); 03729 if (fmt) { 03730 va_list vl; 03731 int len = strlen(str); 03732 03733 va_start(vl, fmt); 03734 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl); 03735 va_end(vl); 03736 } 03737 return strlen(str); 03738 } 03739 03740 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt, 03741 AVFormatContext *src) 03742 { 03743 AVPacket local_pkt; 03744 03745 local_pkt = *pkt; 03746 local_pkt.stream_index = dst_stream; 03747 if (pkt->pts != AV_NOPTS_VALUE) 03748 local_pkt.pts = av_rescale_q(pkt->pts, 03749 src->streams[pkt->stream_index]->time_base, 03750 dst->streams[dst_stream]->time_base); 03751 if (pkt->dts != AV_NOPTS_VALUE) 03752 local_pkt.dts = av_rescale_q(pkt->dts, 03753 src->streams[pkt->stream_index]->time_base, 03754 dst->streams[dst_stream]->time_base); 03755 return av_write_frame(dst, &local_pkt); 03756 } 03757 03758 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf, 03759 void *context) 03760 { 03761 const char *ptr = str; 03762 03763 /* Parse key=value pairs. */ 03764 for (;;) { 03765 const char *key; 03766 char *dest = NULL, *dest_end; 03767 int key_len, dest_len = 0; 03768 03769 /* Skip whitespace and potential commas. */ 03770 while (*ptr && (isspace(*ptr) || *ptr == ',')) 03771 ptr++; 03772 if (!*ptr) 03773 break; 03774 03775 key = ptr; 03776 03777 if (!(ptr = strchr(key, '='))) 03778 break; 03779 ptr++; 03780 key_len = ptr - key; 03781 03782 callback_get_buf(context, key, key_len, &dest, &dest_len); 03783 dest_end = dest + dest_len - 1; 03784 03785 if (*ptr == '\"') { 03786 ptr++; 03787 while (*ptr && *ptr != '\"') { 03788 if (*ptr == '\\') { 03789 if (!ptr[1]) 03790 break; 03791 if (dest && dest < dest_end) 03792 *dest++ = ptr[1]; 03793 ptr += 2; 03794 } else { 03795 if (dest && dest < dest_end) 03796 *dest++ = *ptr; 03797 ptr++; 03798 } 03799 } 03800 if (*ptr == '\"') 03801 ptr++; 03802 } else { 03803 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++) 03804 if (dest && dest < dest_end) 03805 *dest++ = *ptr; 03806 } 03807 if (dest) 03808 *dest = 0; 03809 } 03810 } 03811 03812 int ff_find_stream_index(AVFormatContext *s, int id) 03813 { 03814 int i; 03815 for (i = 0; i < s->nb_streams; i++) { 03816 if (s->streams[i]->id == id) 03817 return i; 03818 } 03819 return -1; 03820 } 03821 03822 void ff_make_absolute_url(char *buf, int size, const char *base, 03823 const char *rel) 03824 { 03825 char *sep; 03826 /* Absolute path, relative to the current server */ 03827 if (base && strstr(base, "://") && rel[0] == '/') { 03828 if (base != buf) 03829 av_strlcpy(buf, base, size); 03830 sep = strstr(buf, "://"); 03831 if (sep) { 03832 sep += 3; 03833 sep = strchr(sep, '/'); 03834 if (sep) 03835 *sep = '\0'; 03836 } 03837 av_strlcat(buf, rel, size); 03838 return; 03839 } 03840 /* If rel actually is an absolute url, just copy it */ 03841 if (!base || strstr(rel, "://") || rel[0] == '/') { 03842 av_strlcpy(buf, rel, size); 03843 return; 03844 } 03845 if (base != buf) 03846 av_strlcpy(buf, base, size); 03847 /* Remove the file name from the base url */ 03848 sep = strrchr(buf, '/'); 03849 if (sep) 03850 sep[1] = '\0'; 03851 else 03852 buf[0] = '\0'; 03853 while (av_strstart(rel, "../", NULL) && sep) { 03854 /* Remove the path delimiter at the end */ 03855 sep[0] = '\0'; 03856 sep = strrchr(buf, '/'); 03857 /* If the next directory name to pop off is "..", break here */ 03858 if (!strcmp(sep ? &sep[1] : buf, "..")) { 03859 /* Readd the slash we just removed */ 03860 av_strlcat(buf, "/", size); 03861 break; 03862 } 03863 /* Cut off the directory name */ 03864 if (sep) 03865 sep[1] = '\0'; 03866 else 03867 buf[0] = '\0'; 03868 rel += 3; 03869 } 03870 av_strlcat(buf, rel, size); 03871 }