Libav 0.7.1
|
00001 /* 00002 * various utility functions for use within Libav 00003 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard 00004 * 00005 * This file is part of Libav. 00006 * 00007 * Libav is free software; you can redistribute it and/or 00008 * modify it under the terms of the GNU Lesser General Public 00009 * License as published by the Free Software Foundation; either 00010 * version 2.1 of the License, or (at your option) any later version. 00011 * 00012 * Libav is distributed in the hope that it will be useful, 00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00015 * Lesser General Public License for more details. 00016 * 00017 * You should have received a copy of the GNU Lesser General Public 00018 * License along with Libav; if not, write to the Free Software 00019 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00020 */ 00021 00022 /* #define DEBUG */ 00023 00024 #include "avformat.h" 00025 #include "avio_internal.h" 00026 #include "internal.h" 00027 #include "libavcodec/internal.h" 00028 #include "libavutil/opt.h" 00029 #include "libavutil/dict.h" 00030 #include "libavutil/pixdesc.h" 00031 #include "metadata.h" 00032 #include "id3v2.h" 00033 #include "libavutil/avstring.h" 00034 #include "riff.h" 00035 #include "audiointerleave.h" 00036 #include "url.h" 00037 #include <sys/time.h> 00038 #include <time.h> 00039 #include <strings.h> 00040 #include <stdarg.h> 00041 #if CONFIG_NETWORK 00042 #include "network.h" 00043 #endif 00044 00045 #undef NDEBUG 00046 #include <assert.h> 00047 00053 unsigned avformat_version(void) 00054 { 00055 return LIBAVFORMAT_VERSION_INT; 00056 } 00057 00058 const char *avformat_configuration(void) 00059 { 00060 return LIBAV_CONFIGURATION; 00061 } 00062 00063 const char *avformat_license(void) 00064 { 00065 #define LICENSE_PREFIX "libavformat license: " 00066 return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1; 00067 } 00068 00069 /* fraction handling */ 00070 00081 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den) 00082 { 00083 num += (den >> 1); 00084 if (num >= den) { 00085 val += num / den; 00086 num = num % den; 00087 } 00088 f->val = val; 00089 f->num = num; 00090 f->den = den; 00091 } 00092 00099 static void av_frac_add(AVFrac *f, int64_t incr) 00100 { 00101 int64_t num, den; 00102 00103 num = f->num + incr; 00104 den = f->den; 00105 if (num < 0) { 00106 f->val += num / den; 00107 num = num % den; 00108 if (num < 0) { 00109 num += den; 00110 f->val--; 00111 } 00112 } else if (num >= den) { 00113 f->val += num / den; 00114 num = num % den; 00115 } 00116 f->num = num; 00117 } 00118 00120 static AVInputFormat *first_iformat = NULL; 00122 static AVOutputFormat *first_oformat = NULL; 00123 00124 AVInputFormat *av_iformat_next(AVInputFormat *f) 00125 { 00126 if(f) return f->next; 00127 else return first_iformat; 00128 } 00129 00130 AVOutputFormat *av_oformat_next(AVOutputFormat *f) 00131 { 00132 if(f) return f->next; 00133 else return first_oformat; 00134 } 00135 00136 void av_register_input_format(AVInputFormat *format) 00137 { 00138 AVInputFormat **p; 00139 p = &first_iformat; 00140 while (*p != NULL) p = &(*p)->next; 00141 *p = format; 00142 format->next = NULL; 00143 } 00144 00145 void av_register_output_format(AVOutputFormat *format) 00146 { 00147 AVOutputFormat **p; 00148 p = &first_oformat; 00149 while (*p != NULL) p = &(*p)->next; 00150 *p = format; 00151 format->next = NULL; 00152 } 00153 00154 int av_match_ext(const char *filename, const char *extensions) 00155 { 00156 const char *ext, *p; 00157 char ext1[32], *q; 00158 00159 if(!filename) 00160 return 0; 00161 00162 ext = strrchr(filename, '.'); 00163 if (ext) { 00164 ext++; 00165 p = extensions; 00166 for(;;) { 00167 q = ext1; 00168 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1) 00169 *q++ = *p++; 00170 *q = '\0'; 00171 if (!strcasecmp(ext1, ext)) 00172 return 1; 00173 if (*p == '\0') 00174 break; 00175 p++; 00176 } 00177 } 00178 return 0; 00179 } 00180 00181 static int match_format(const char *name, const char *names) 00182 { 00183 const char *p; 00184 int len, namelen; 00185 00186 if (!name || !names) 00187 return 0; 00188 00189 namelen = strlen(name); 00190 while ((p = strchr(names, ','))) { 00191 len = FFMAX(p - names, namelen); 00192 if (!strncasecmp(name, names, len)) 00193 return 1; 00194 names = p+1; 00195 } 00196 return !strcasecmp(name, names); 00197 } 00198 00199 AVOutputFormat *av_guess_format(const char *short_name, const char *filename, 00200 const char *mime_type) 00201 { 00202 AVOutputFormat *fmt = NULL, *fmt_found; 00203 int score_max, score; 00204 00205 /* specific test for image sequences */ 00206 #if CONFIG_IMAGE2_MUXER 00207 if (!short_name && filename && 00208 av_filename_number_test(filename) && 00209 ff_guess_image2_codec(filename) != CODEC_ID_NONE) { 00210 return av_guess_format("image2", NULL, NULL); 00211 } 00212 #endif 00213 /* Find the proper file type. */ 00214 fmt_found = NULL; 00215 score_max = 0; 00216 while ((fmt = av_oformat_next(fmt))) { 00217 score = 0; 00218 if (fmt->name && short_name && !strcmp(fmt->name, short_name)) 00219 score += 100; 00220 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type)) 00221 score += 10; 00222 if (filename && fmt->extensions && 00223 av_match_ext(filename, fmt->extensions)) { 00224 score += 5; 00225 } 00226 if (score > score_max) { 00227 score_max = score; 00228 fmt_found = fmt; 00229 } 00230 } 00231 return fmt_found; 00232 } 00233 00234 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, 00235 const char *filename, const char *mime_type, enum AVMediaType type){ 00236 if(type == AVMEDIA_TYPE_VIDEO){ 00237 enum CodecID codec_id= CODEC_ID_NONE; 00238 00239 #if CONFIG_IMAGE2_MUXER 00240 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){ 00241 codec_id= ff_guess_image2_codec(filename); 00242 } 00243 #endif 00244 if(codec_id == CODEC_ID_NONE) 00245 codec_id= fmt->video_codec; 00246 return codec_id; 00247 }else if(type == AVMEDIA_TYPE_AUDIO) 00248 return fmt->audio_codec; 00249 else if (type == AVMEDIA_TYPE_SUBTITLE) 00250 return fmt->subtitle_codec; 00251 else 00252 return CODEC_ID_NONE; 00253 } 00254 00255 AVInputFormat *av_find_input_format(const char *short_name) 00256 { 00257 AVInputFormat *fmt = NULL; 00258 while ((fmt = av_iformat_next(fmt))) { 00259 if (match_format(short_name, fmt->name)) 00260 return fmt; 00261 } 00262 return NULL; 00263 } 00264 00265 00266 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size) 00267 { 00268 int ret= av_new_packet(pkt, size); 00269 00270 if(ret<0) 00271 return ret; 00272 00273 pkt->pos= avio_tell(s); 00274 00275 ret= avio_read(s, pkt->data, size); 00276 if(ret<=0) 00277 av_free_packet(pkt); 00278 else 00279 av_shrink_packet(pkt, ret); 00280 00281 return ret; 00282 } 00283 00284 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size) 00285 { 00286 int ret; 00287 int old_size; 00288 if (!pkt->size) 00289 return av_get_packet(s, pkt, size); 00290 old_size = pkt->size; 00291 ret = av_grow_packet(pkt, size); 00292 if (ret < 0) 00293 return ret; 00294 ret = avio_read(s, pkt->data + old_size, size); 00295 av_shrink_packet(pkt, old_size + FFMAX(ret, 0)); 00296 return ret; 00297 } 00298 00299 00300 int av_filename_number_test(const char *filename) 00301 { 00302 char buf[1024]; 00303 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0); 00304 } 00305 00306 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max) 00307 { 00308 AVProbeData lpd = *pd; 00309 AVInputFormat *fmt1 = NULL, *fmt; 00310 int score, id3 = 0; 00311 00312 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) { 00313 int id3len = ff_id3v2_tag_len(lpd.buf); 00314 if (lpd.buf_size > id3len + 16) { 00315 lpd.buf += id3len; 00316 lpd.buf_size -= id3len; 00317 } 00318 id3 = 1; 00319 } 00320 00321 fmt = NULL; 00322 while ((fmt1 = av_iformat_next(fmt1))) { 00323 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE)) 00324 continue; 00325 score = 0; 00326 if (fmt1->read_probe) { 00327 score = fmt1->read_probe(&lpd); 00328 } else if (fmt1->extensions) { 00329 if (av_match_ext(lpd.filename, fmt1->extensions)) { 00330 score = 50; 00331 } 00332 } 00333 if (score > *score_max) { 00334 *score_max = score; 00335 fmt = fmt1; 00336 }else if (score == *score_max) 00337 fmt = NULL; 00338 } 00339 00340 /* a hack for files with huge id3v2 tags -- try to guess by file extension. */ 00341 if (!fmt && id3 && *score_max < AVPROBE_SCORE_MAX/4) { 00342 while ((fmt = av_iformat_next(fmt))) 00343 if (fmt->extensions && av_match_ext(lpd.filename, fmt->extensions)) { 00344 *score_max = AVPROBE_SCORE_MAX/4; 00345 break; 00346 } 00347 } 00348 00349 return fmt; 00350 } 00351 00352 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){ 00353 int score=0; 00354 return av_probe_input_format2(pd, is_opened, &score); 00355 } 00356 00357 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score) 00358 { 00359 static const struct { 00360 const char *name; enum CodecID id; enum AVMediaType type; 00361 } fmt_id_type[] = { 00362 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO }, 00363 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO }, 00364 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO }, 00365 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO }, 00366 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO }, 00367 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO }, 00368 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO }, 00369 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO }, 00370 { 0 } 00371 }; 00372 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score); 00373 00374 if (fmt) { 00375 int i; 00376 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n", 00377 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score); 00378 for (i = 0; fmt_id_type[i].name; i++) { 00379 if (!strcmp(fmt->name, fmt_id_type[i].name)) { 00380 st->codec->codec_id = fmt_id_type[i].id; 00381 st->codec->codec_type = fmt_id_type[i].type; 00382 break; 00383 } 00384 } 00385 } 00386 return !!fmt; 00387 } 00388 00389 /************************************************************/ 00390 /* input media file */ 00391 00392 #if FF_API_FORMAT_PARAMETERS 00393 static AVDictionary *convert_format_parameters(AVFormatParameters *ap) 00394 { 00395 char buf[1024]; 00396 AVDictionary *opts = NULL; 00397 00398 if (!ap) 00399 return NULL; 00400 00401 if (ap->time_base.num) { 00402 snprintf(buf, sizeof(buf), "%d/%d", ap->time_base.den, ap->time_base.num); 00403 av_dict_set(&opts, "framerate", buf, 0); 00404 } 00405 if (ap->sample_rate) { 00406 snprintf(buf, sizeof(buf), "%d", ap->sample_rate); 00407 av_dict_set(&opts, "sample_rate", buf, 0); 00408 } 00409 if (ap->channels) { 00410 snprintf(buf, sizeof(buf), "%d", ap->channels); 00411 av_dict_set(&opts, "channels", buf, 0); 00412 } 00413 if (ap->width || ap->height) { 00414 snprintf(buf, sizeof(buf), "%dx%d", ap->width, ap->height); 00415 av_dict_set(&opts, "video_size", buf, 0); 00416 } 00417 if (ap->pix_fmt != PIX_FMT_NONE) { 00418 av_dict_set(&opts, "pixel_format", av_get_pix_fmt_name(ap->pix_fmt), 0); 00419 } 00420 if (ap->channel) { 00421 snprintf(buf, sizeof(buf), "%d", ap->channel); 00422 av_dict_set(&opts, "channel", buf, 0); 00423 } 00424 if (ap->standard) { 00425 av_dict_set(&opts, "standard", ap->standard, 0); 00426 } 00427 if (ap->mpeg2ts_compute_pcr) { 00428 av_dict_set(&opts, "mpeg2ts_compute_pcr", "1", 0); 00429 } 00430 if (ap->initial_pause) { 00431 av_dict_set(&opts, "initial_pause", "1", 0); 00432 } 00433 return opts; 00434 } 00435 00439 int av_open_input_stream(AVFormatContext **ic_ptr, 00440 AVIOContext *pb, const char *filename, 00441 AVInputFormat *fmt, AVFormatParameters *ap) 00442 { 00443 int err; 00444 AVDictionary *opts; 00445 AVFormatContext *ic; 00446 AVFormatParameters default_ap; 00447 00448 if(!ap){ 00449 ap=&default_ap; 00450 memset(ap, 0, sizeof(default_ap)); 00451 } 00452 opts = convert_format_parameters(ap); 00453 00454 if(!ap->prealloced_context) 00455 ic = avformat_alloc_context(); 00456 else 00457 ic = *ic_ptr; 00458 if (!ic) { 00459 err = AVERROR(ENOMEM); 00460 goto fail; 00461 } 00462 if (pb && fmt && fmt->flags & AVFMT_NOFILE) 00463 av_log(ic, AV_LOG_WARNING, "Custom AVIOContext makes no sense and " 00464 "will be ignored with AVFMT_NOFILE format.\n"); 00465 else 00466 ic->pb = pb; 00467 00468 if ((err = avformat_open_input(&ic, filename, fmt, &opts)) < 0) 00469 goto fail; 00470 ic->pb = ic->pb ? ic->pb : pb; // don't leak custom pb if it wasn't set above 00471 00472 fail: 00473 *ic_ptr = ic; 00474 av_dict_free(&opts); 00475 return err; 00476 } 00477 #endif 00478 00480 #define PROBE_BUF_MIN 2048 00481 #define PROBE_BUF_MAX (1<<20) 00482 00483 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, 00484 const char *filename, void *logctx, 00485 unsigned int offset, unsigned int max_probe_size) 00486 { 00487 AVProbeData pd = { filename ? filename : "", NULL, -offset }; 00488 unsigned char *buf = NULL; 00489 int ret = 0, probe_size; 00490 00491 if (!max_probe_size) { 00492 max_probe_size = PROBE_BUF_MAX; 00493 } else if (max_probe_size > PROBE_BUF_MAX) { 00494 max_probe_size = PROBE_BUF_MAX; 00495 } else if (max_probe_size < PROBE_BUF_MIN) { 00496 return AVERROR(EINVAL); 00497 } 00498 00499 if (offset >= max_probe_size) { 00500 return AVERROR(EINVAL); 00501 } 00502 00503 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0; 00504 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) { 00505 int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0; 00506 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1; 00507 00508 if (probe_size < offset) { 00509 continue; 00510 } 00511 00512 /* read probe data */ 00513 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE); 00514 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) { 00515 /* fail if error was not end of file, otherwise, lower score */ 00516 if (ret != AVERROR_EOF) { 00517 av_free(buf); 00518 return ret; 00519 } 00520 score = 0; 00521 ret = 0; /* error was end of file, nothing read */ 00522 } 00523 pd.buf_size += ret; 00524 pd.buf = &buf[offset]; 00525 00526 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE); 00527 00528 /* guess file format */ 00529 *fmt = av_probe_input_format2(&pd, 1, &score); 00530 if(*fmt){ 00531 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration 00532 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score); 00533 }else 00534 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score); 00535 } 00536 } 00537 00538 if (!*fmt) { 00539 av_free(buf); 00540 return AVERROR_INVALIDDATA; 00541 } 00542 00543 /* rewind. reuse probe buffer to avoid seeking */ 00544 if ((ret = ffio_rewind_with_probe_data(pb, buf, pd.buf_size)) < 0) 00545 av_free(buf); 00546 00547 return ret; 00548 } 00549 00550 #if FF_API_FORMAT_PARAMETERS 00551 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename, 00552 AVInputFormat *fmt, 00553 int buf_size, 00554 AVFormatParameters *ap) 00555 { 00556 int err; 00557 AVDictionary *opts = convert_format_parameters(ap); 00558 00559 if (!ap || !ap->prealloced_context) 00560 *ic_ptr = NULL; 00561 00562 err = avformat_open_input(ic_ptr, filename, fmt, &opts); 00563 00564 av_dict_free(&opts); 00565 return err; 00566 } 00567 #endif 00568 00569 /* open input file and probe the format if necessary */ 00570 static int init_input(AVFormatContext *s, const char *filename) 00571 { 00572 int ret; 00573 AVProbeData pd = {filename, NULL, 0}; 00574 00575 if (s->pb) { 00576 s->flags |= AVFMT_FLAG_CUSTOM_IO; 00577 if (!s->iformat) 00578 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0); 00579 else if (s->iformat->flags & AVFMT_NOFILE) 00580 return AVERROR(EINVAL); 00581 return 0; 00582 } 00583 00584 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) || 00585 (!s->iformat && (s->iformat = av_probe_input_format(&pd, 0)))) 00586 return 0; 00587 00588 if ((ret = avio_open(&s->pb, filename, AVIO_FLAG_READ)) < 0) 00589 return ret; 00590 if (s->iformat) 00591 return 0; 00592 return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0); 00593 } 00594 00595 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options) 00596 { 00597 AVFormatContext *s = *ps; 00598 int ret = 0; 00599 AVFormatParameters ap = { 0 }; 00600 AVDictionary *tmp = NULL; 00601 00602 if (!s && !(s = avformat_alloc_context())) 00603 return AVERROR(ENOMEM); 00604 if (fmt) 00605 s->iformat = fmt; 00606 00607 if (options) 00608 av_dict_copy(&tmp, *options, 0); 00609 00610 if ((ret = av_opt_set_dict(s, &tmp)) < 0) 00611 goto fail; 00612 00613 if ((ret = init_input(s, filename)) < 0) 00614 goto fail; 00615 00616 /* check filename in case an image number is expected */ 00617 if (s->iformat->flags & AVFMT_NEEDNUMBER) { 00618 if (!av_filename_number_test(filename)) { 00619 ret = AVERROR(EINVAL); 00620 goto fail; 00621 } 00622 } 00623 00624 s->duration = s->start_time = AV_NOPTS_VALUE; 00625 av_strlcpy(s->filename, filename, sizeof(s->filename)); 00626 00627 /* allocate private data */ 00628 if (s->iformat->priv_data_size > 0) { 00629 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) { 00630 ret = AVERROR(ENOMEM); 00631 goto fail; 00632 } 00633 if (s->iformat->priv_class) { 00634 *(const AVClass**)s->priv_data = s->iformat->priv_class; 00635 av_opt_set_defaults(s->priv_data); 00636 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0) 00637 goto fail; 00638 } 00639 } 00640 00641 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */ 00642 if (s->pb) 00643 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC); 00644 00645 if (s->iformat->read_header) 00646 if ((ret = s->iformat->read_header(s, &ap)) < 0) 00647 goto fail; 00648 00649 if (s->pb && !s->data_offset) 00650 s->data_offset = avio_tell(s->pb); 00651 00652 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE; 00653 00654 if (options) { 00655 av_dict_free(options); 00656 *options = tmp; 00657 } 00658 *ps = s; 00659 return 0; 00660 00661 fail: 00662 av_dict_free(&tmp); 00663 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO)) 00664 avio_close(s->pb); 00665 avformat_free_context(s); 00666 *ps = NULL; 00667 return ret; 00668 } 00669 00670 /*******************************************************/ 00671 00672 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt, 00673 AVPacketList **plast_pktl){ 00674 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList)); 00675 if (!pktl) 00676 return NULL; 00677 00678 if (*packet_buffer) 00679 (*plast_pktl)->next = pktl; 00680 else 00681 *packet_buffer = pktl; 00682 00683 /* add the packet in the buffered packet list */ 00684 *plast_pktl = pktl; 00685 pktl->pkt= *pkt; 00686 return &pktl->pkt; 00687 } 00688 00689 int av_read_packet(AVFormatContext *s, AVPacket *pkt) 00690 { 00691 int ret, i; 00692 AVStream *st; 00693 00694 for(;;){ 00695 AVPacketList *pktl = s->raw_packet_buffer; 00696 00697 if (pktl) { 00698 *pkt = pktl->pkt; 00699 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE || 00700 !s->streams[pkt->stream_index]->probe_packets || 00701 s->raw_packet_buffer_remaining_size < pkt->size){ 00702 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data; 00703 av_freep(&pd->buf); 00704 pd->buf_size = 0; 00705 s->raw_packet_buffer = pktl->next; 00706 s->raw_packet_buffer_remaining_size += pkt->size; 00707 av_free(pktl); 00708 return 0; 00709 } 00710 } 00711 00712 av_init_packet(pkt); 00713 ret= s->iformat->read_packet(s, pkt); 00714 if (ret < 0) { 00715 if (!pktl || ret == AVERROR(EAGAIN)) 00716 return ret; 00717 for (i = 0; i < s->nb_streams; i++) 00718 s->streams[i]->probe_packets = 0; 00719 continue; 00720 } 00721 st= s->streams[pkt->stream_index]; 00722 00723 switch(st->codec->codec_type){ 00724 case AVMEDIA_TYPE_VIDEO: 00725 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id; 00726 break; 00727 case AVMEDIA_TYPE_AUDIO: 00728 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id; 00729 break; 00730 case AVMEDIA_TYPE_SUBTITLE: 00731 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id; 00732 break; 00733 } 00734 00735 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE || 00736 !st->probe_packets)) 00737 return ret; 00738 00739 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end); 00740 s->raw_packet_buffer_remaining_size -= pkt->size; 00741 00742 if(st->codec->codec_id == CODEC_ID_PROBE){ 00743 AVProbeData *pd = &st->probe_data; 00744 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index); 00745 --st->probe_packets; 00746 00747 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE); 00748 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size); 00749 pd->buf_size += pkt->size; 00750 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE); 00751 00752 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){ 00753 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes 00754 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0); 00755 if(st->codec->codec_id != CODEC_ID_PROBE){ 00756 pd->buf_size=0; 00757 av_freep(&pd->buf); 00758 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index); 00759 } 00760 } 00761 } 00762 } 00763 } 00764 00765 /**********************************************************/ 00766 00770 static int get_audio_frame_size(AVCodecContext *enc, int size) 00771 { 00772 int frame_size; 00773 00774 if(enc->codec_id == CODEC_ID_VORBIS) 00775 return -1; 00776 00777 if (enc->frame_size <= 1) { 00778 int bits_per_sample = av_get_bits_per_sample(enc->codec_id); 00779 00780 if (bits_per_sample) { 00781 if (enc->channels == 0) 00782 return -1; 00783 frame_size = (size << 3) / (bits_per_sample * enc->channels); 00784 } else { 00785 /* used for example by ADPCM codecs */ 00786 if (enc->bit_rate == 0) 00787 return -1; 00788 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate; 00789 } 00790 } else { 00791 frame_size = enc->frame_size; 00792 } 00793 return frame_size; 00794 } 00795 00796 00800 static void compute_frame_duration(int *pnum, int *pden, AVStream *st, 00801 AVCodecParserContext *pc, AVPacket *pkt) 00802 { 00803 int frame_size; 00804 00805 *pnum = 0; 00806 *pden = 0; 00807 switch(st->codec->codec_type) { 00808 case AVMEDIA_TYPE_VIDEO: 00809 if(st->time_base.num*1000LL > st->time_base.den){ 00810 *pnum = st->time_base.num; 00811 *pden = st->time_base.den; 00812 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){ 00813 *pnum = st->codec->time_base.num; 00814 *pden = st->codec->time_base.den; 00815 if (pc && pc->repeat_pict) { 00816 *pnum = (*pnum) * (1 + pc->repeat_pict); 00817 } 00818 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet 00819 //Thus if we have no parser in such case leave duration undefined. 00820 if(st->codec->ticks_per_frame>1 && !pc){ 00821 *pnum = *pden = 0; 00822 } 00823 } 00824 break; 00825 case AVMEDIA_TYPE_AUDIO: 00826 frame_size = get_audio_frame_size(st->codec, pkt->size); 00827 if (frame_size <= 0 || st->codec->sample_rate <= 0) 00828 break; 00829 *pnum = frame_size; 00830 *pden = st->codec->sample_rate; 00831 break; 00832 default: 00833 break; 00834 } 00835 } 00836 00837 static int is_intra_only(AVCodecContext *enc){ 00838 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){ 00839 return 1; 00840 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){ 00841 switch(enc->codec_id){ 00842 case CODEC_ID_MJPEG: 00843 case CODEC_ID_MJPEGB: 00844 case CODEC_ID_LJPEG: 00845 case CODEC_ID_RAWVIDEO: 00846 case CODEC_ID_DVVIDEO: 00847 case CODEC_ID_HUFFYUV: 00848 case CODEC_ID_FFVHUFF: 00849 case CODEC_ID_ASV1: 00850 case CODEC_ID_ASV2: 00851 case CODEC_ID_VCR1: 00852 case CODEC_ID_DNXHD: 00853 case CODEC_ID_JPEG2000: 00854 return 1; 00855 default: break; 00856 } 00857 } 00858 return 0; 00859 } 00860 00861 static void update_initial_timestamps(AVFormatContext *s, int stream_index, 00862 int64_t dts, int64_t pts) 00863 { 00864 AVStream *st= s->streams[stream_index]; 00865 AVPacketList *pktl= s->packet_buffer; 00866 00867 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE) 00868 return; 00869 00870 st->first_dts= dts - st->cur_dts; 00871 st->cur_dts= dts; 00872 00873 for(; pktl; pktl= pktl->next){ 00874 if(pktl->pkt.stream_index != stream_index) 00875 continue; 00876 //FIXME think more about this check 00877 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts) 00878 pktl->pkt.pts += st->first_dts; 00879 00880 if(pktl->pkt.dts != AV_NOPTS_VALUE) 00881 pktl->pkt.dts += st->first_dts; 00882 00883 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE) 00884 st->start_time= pktl->pkt.pts; 00885 } 00886 if (st->start_time == AV_NOPTS_VALUE) 00887 st->start_time = pts; 00888 } 00889 00890 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt) 00891 { 00892 AVPacketList *pktl= s->packet_buffer; 00893 int64_t cur_dts= 0; 00894 00895 if(st->first_dts != AV_NOPTS_VALUE){ 00896 cur_dts= st->first_dts; 00897 for(; pktl; pktl= pktl->next){ 00898 if(pktl->pkt.stream_index == pkt->stream_index){ 00899 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration) 00900 break; 00901 cur_dts -= pkt->duration; 00902 } 00903 } 00904 pktl= s->packet_buffer; 00905 st->first_dts = cur_dts; 00906 }else if(st->cur_dts) 00907 return; 00908 00909 for(; pktl; pktl= pktl->next){ 00910 if(pktl->pkt.stream_index != pkt->stream_index) 00911 continue; 00912 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE 00913 && !pktl->pkt.duration){ 00914 pktl->pkt.dts= cur_dts; 00915 if(!st->codec->has_b_frames) 00916 pktl->pkt.pts= cur_dts; 00917 cur_dts += pkt->duration; 00918 pktl->pkt.duration= pkt->duration; 00919 }else 00920 break; 00921 } 00922 if(st->first_dts == AV_NOPTS_VALUE) 00923 st->cur_dts= cur_dts; 00924 } 00925 00926 static void compute_pkt_fields(AVFormatContext *s, AVStream *st, 00927 AVCodecParserContext *pc, AVPacket *pkt) 00928 { 00929 int num, den, presentation_delayed, delay, i; 00930 int64_t offset; 00931 00932 if (s->flags & AVFMT_FLAG_NOFILLIN) 00933 return; 00934 00935 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE) 00936 pkt->dts= AV_NOPTS_VALUE; 00937 00938 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B) 00939 //FIXME Set low_delay = 0 when has_b_frames = 1 00940 st->codec->has_b_frames = 1; 00941 00942 /* do we have a video B-frame ? */ 00943 delay= st->codec->has_b_frames; 00944 presentation_delayed = 0; 00945 00946 // ignore delay caused by frame threading so that the mpeg2-without-dts 00947 // warning will not trigger 00948 if (delay && st->codec->active_thread_type&FF_THREAD_FRAME) 00949 delay -= st->codec->thread_count-1; 00950 00951 /* XXX: need has_b_frame, but cannot get it if the codec is 00952 not initialized */ 00953 if (delay && 00954 pc && pc->pict_type != AV_PICTURE_TYPE_B) 00955 presentation_delayed = 1; 00956 00957 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63 00958 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){ 00959 pkt->dts -= 1LL<<st->pts_wrap_bits; 00960 } 00961 00962 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg) 00963 // we take the conservative approach and discard both 00964 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly. 00965 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){ 00966 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n"); 00967 pkt->dts= pkt->pts= AV_NOPTS_VALUE; 00968 } 00969 00970 if (pkt->duration == 0) { 00971 compute_frame_duration(&num, &den, st, pc, pkt); 00972 if (den && num) { 00973 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN); 00974 00975 if(pkt->duration != 0 && s->packet_buffer) 00976 update_initial_durations(s, st, pkt); 00977 } 00978 } 00979 00980 /* correct timestamps with byte offset if demuxers only have timestamps 00981 on packet boundaries */ 00982 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){ 00983 /* this will estimate bitrate based on this frame's duration and size */ 00984 offset = av_rescale(pc->offset, pkt->duration, pkt->size); 00985 if(pkt->pts != AV_NOPTS_VALUE) 00986 pkt->pts += offset; 00987 if(pkt->dts != AV_NOPTS_VALUE) 00988 pkt->dts += offset; 00989 } 00990 00991 if (pc && pc->dts_sync_point >= 0) { 00992 // we have synchronization info from the parser 00993 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num; 00994 if (den > 0) { 00995 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den; 00996 if (pkt->dts != AV_NOPTS_VALUE) { 00997 // got DTS from the stream, update reference timestamp 00998 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den; 00999 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; 01000 } else if (st->reference_dts != AV_NOPTS_VALUE) { 01001 // compute DTS based on reference timestamp 01002 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den; 01003 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den; 01004 } 01005 if (pc->dts_sync_point > 0) 01006 st->reference_dts = pkt->dts; // new reference 01007 } 01008 } 01009 01010 /* This may be redundant, but it should not hurt. */ 01011 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts) 01012 presentation_delayed = 1; 01013 01014 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc); 01015 /* interpolate PTS and DTS if they are not present */ 01016 //We skip H264 currently because delay and has_b_frames are not reliably set 01017 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){ 01018 if (presentation_delayed) { 01019 /* DTS = decompression timestamp */ 01020 /* PTS = presentation timestamp */ 01021 if (pkt->dts == AV_NOPTS_VALUE) 01022 pkt->dts = st->last_IP_pts; 01023 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); 01024 if (pkt->dts == AV_NOPTS_VALUE) 01025 pkt->dts = st->cur_dts; 01026 01027 /* this is tricky: the dts must be incremented by the duration 01028 of the frame we are displaying, i.e. the last I- or P-frame */ 01029 if (st->last_IP_duration == 0) 01030 st->last_IP_duration = pkt->duration; 01031 if(pkt->dts != AV_NOPTS_VALUE) 01032 st->cur_dts = pkt->dts + st->last_IP_duration; 01033 st->last_IP_duration = pkt->duration; 01034 st->last_IP_pts= pkt->pts; 01035 /* cannot compute PTS if not present (we can compute it only 01036 by knowing the future */ 01037 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){ 01038 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){ 01039 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts); 01040 int64_t new_diff= FFABS(st->cur_dts - pkt->pts); 01041 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){ 01042 pkt->pts += pkt->duration; 01043 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size); 01044 } 01045 } 01046 01047 /* presentation is not delayed : PTS and DTS are the same */ 01048 if(pkt->pts == AV_NOPTS_VALUE) 01049 pkt->pts = pkt->dts; 01050 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts); 01051 if(pkt->pts == AV_NOPTS_VALUE) 01052 pkt->pts = st->cur_dts; 01053 pkt->dts = pkt->pts; 01054 if(pkt->pts != AV_NOPTS_VALUE) 01055 st->cur_dts = pkt->pts + pkt->duration; 01056 } 01057 } 01058 01059 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ 01060 st->pts_buffer[0]= pkt->pts; 01061 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) 01062 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); 01063 if(pkt->dts == AV_NOPTS_VALUE) 01064 pkt->dts= st->pts_buffer[0]; 01065 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here 01066 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet 01067 } 01068 if(pkt->dts > st->cur_dts) 01069 st->cur_dts = pkt->dts; 01070 } 01071 01072 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts); 01073 01074 /* update flags */ 01075 if(is_intra_only(st->codec)) 01076 pkt->flags |= AV_PKT_FLAG_KEY; 01077 else if (pc) { 01078 pkt->flags = 0; 01079 /* keyframe computation */ 01080 if (pc->key_frame == 1) 01081 pkt->flags |= AV_PKT_FLAG_KEY; 01082 else if (pc->key_frame == -1 && pc->pict_type == AV_PICTURE_TYPE_I) 01083 pkt->flags |= AV_PKT_FLAG_KEY; 01084 } 01085 if (pc) 01086 pkt->convergence_duration = pc->convergence_duration; 01087 } 01088 01089 01090 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt) 01091 { 01092 AVStream *st; 01093 int len, ret, i; 01094 01095 av_init_packet(pkt); 01096 01097 for(;;) { 01098 /* select current input stream component */ 01099 st = s->cur_st; 01100 if (st) { 01101 if (!st->need_parsing || !st->parser) { 01102 /* no parsing needed: we just output the packet as is */ 01103 /* raw data support */ 01104 *pkt = st->cur_pkt; st->cur_pkt.data= NULL; 01105 compute_pkt_fields(s, st, NULL, pkt); 01106 s->cur_st = NULL; 01107 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && 01108 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) { 01109 ff_reduce_index(s, st->index); 01110 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME); 01111 } 01112 break; 01113 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) { 01114 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size, 01115 st->cur_ptr, st->cur_len, 01116 st->cur_pkt.pts, st->cur_pkt.dts, 01117 st->cur_pkt.pos); 01118 st->cur_pkt.pts = AV_NOPTS_VALUE; 01119 st->cur_pkt.dts = AV_NOPTS_VALUE; 01120 /* increment read pointer */ 01121 st->cur_ptr += len; 01122 st->cur_len -= len; 01123 01124 /* return packet if any */ 01125 if (pkt->size) { 01126 got_packet: 01127 pkt->duration = 0; 01128 pkt->stream_index = st->index; 01129 pkt->pts = st->parser->pts; 01130 pkt->dts = st->parser->dts; 01131 pkt->pos = st->parser->pos; 01132 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){ 01133 s->cur_st = NULL; 01134 pkt->destruct= st->cur_pkt.destruct; 01135 st->cur_pkt.destruct= NULL; 01136 st->cur_pkt.data = NULL; 01137 assert(st->cur_len == 0); 01138 }else{ 01139 pkt->destruct = NULL; 01140 } 01141 compute_pkt_fields(s, st, st->parser, pkt); 01142 01143 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){ 01144 ff_reduce_index(s, st->index); 01145 av_add_index_entry(st, st->parser->frame_offset, pkt->dts, 01146 0, 0, AVINDEX_KEYFRAME); 01147 } 01148 01149 break; 01150 } 01151 } else { 01152 /* free packet */ 01153 av_free_packet(&st->cur_pkt); 01154 s->cur_st = NULL; 01155 } 01156 } else { 01157 AVPacket cur_pkt; 01158 /* read next packet */ 01159 ret = av_read_packet(s, &cur_pkt); 01160 if (ret < 0) { 01161 if (ret == AVERROR(EAGAIN)) 01162 return ret; 01163 /* return the last frames, if any */ 01164 for(i = 0; i < s->nb_streams; i++) { 01165 st = s->streams[i]; 01166 if (st->parser && st->need_parsing) { 01167 av_parser_parse2(st->parser, st->codec, 01168 &pkt->data, &pkt->size, 01169 NULL, 0, 01170 AV_NOPTS_VALUE, AV_NOPTS_VALUE, 01171 AV_NOPTS_VALUE); 01172 if (pkt->size) 01173 goto got_packet; 01174 } 01175 } 01176 /* no more packets: really terminate parsing */ 01177 return ret; 01178 } 01179 st = s->streams[cur_pkt.stream_index]; 01180 st->cur_pkt= cur_pkt; 01181 01182 if(st->cur_pkt.pts != AV_NOPTS_VALUE && 01183 st->cur_pkt.dts != AV_NOPTS_VALUE && 01184 st->cur_pkt.pts < st->cur_pkt.dts){ 01185 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n", 01186 st->cur_pkt.stream_index, 01187 st->cur_pkt.pts, 01188 st->cur_pkt.dts, 01189 st->cur_pkt.size); 01190 // av_free_packet(&st->cur_pkt); 01191 // return -1; 01192 } 01193 01194 if(s->debug & FF_FDEBUG_TS) 01195 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n", 01196 st->cur_pkt.stream_index, 01197 st->cur_pkt.pts, 01198 st->cur_pkt.dts, 01199 st->cur_pkt.size, 01200 st->cur_pkt.duration, 01201 st->cur_pkt.flags); 01202 01203 s->cur_st = st; 01204 st->cur_ptr = st->cur_pkt.data; 01205 st->cur_len = st->cur_pkt.size; 01206 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) { 01207 st->parser = av_parser_init(st->codec->codec_id); 01208 if (!st->parser) { 01209 /* no parser available: just output the raw packets */ 01210 st->need_parsing = AVSTREAM_PARSE_NONE; 01211 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){ 01212 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; 01213 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){ 01214 st->parser->flags |= PARSER_FLAG_ONCE; 01215 } 01216 } 01217 } 01218 } 01219 if(s->debug & FF_FDEBUG_TS) 01220 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n", 01221 pkt->stream_index, 01222 pkt->pts, 01223 pkt->dts, 01224 pkt->size, 01225 pkt->duration, 01226 pkt->flags); 01227 01228 return 0; 01229 } 01230 01231 int av_read_frame(AVFormatContext *s, AVPacket *pkt) 01232 { 01233 AVPacketList *pktl; 01234 int eof=0; 01235 const int genpts= s->flags & AVFMT_FLAG_GENPTS; 01236 01237 for(;;){ 01238 pktl = s->packet_buffer; 01239 if (pktl) { 01240 AVPacket *next_pkt= &pktl->pkt; 01241 01242 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){ 01243 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits; 01244 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){ 01245 if( pktl->pkt.stream_index == next_pkt->stream_index 01246 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) 01247 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame 01248 next_pkt->pts= pktl->pkt.dts; 01249 } 01250 pktl= pktl->next; 01251 } 01252 pktl = s->packet_buffer; 01253 } 01254 01255 if( next_pkt->pts != AV_NOPTS_VALUE 01256 || next_pkt->dts == AV_NOPTS_VALUE 01257 || !genpts || eof){ 01258 /* read packet from packet buffer, if there is data */ 01259 *pkt = *next_pkt; 01260 s->packet_buffer = pktl->next; 01261 av_free(pktl); 01262 return 0; 01263 } 01264 } 01265 if(genpts){ 01266 int ret= av_read_frame_internal(s, pkt); 01267 if(ret<0){ 01268 if(pktl && ret != AVERROR(EAGAIN)){ 01269 eof=1; 01270 continue; 01271 }else 01272 return ret; 01273 } 01274 01275 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt, 01276 &s->packet_buffer_end)) < 0) 01277 return AVERROR(ENOMEM); 01278 }else{ 01279 assert(!s->packet_buffer); 01280 return av_read_frame_internal(s, pkt); 01281 } 01282 } 01283 } 01284 01285 /* XXX: suppress the packet queue */ 01286 static void flush_packet_queue(AVFormatContext *s) 01287 { 01288 AVPacketList *pktl; 01289 01290 for(;;) { 01291 pktl = s->packet_buffer; 01292 if (!pktl) 01293 break; 01294 s->packet_buffer = pktl->next; 01295 av_free_packet(&pktl->pkt); 01296 av_free(pktl); 01297 } 01298 while(s->raw_packet_buffer){ 01299 pktl = s->raw_packet_buffer; 01300 s->raw_packet_buffer = pktl->next; 01301 av_free_packet(&pktl->pkt); 01302 av_free(pktl); 01303 } 01304 s->packet_buffer_end= 01305 s->raw_packet_buffer_end= NULL; 01306 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE; 01307 } 01308 01309 /*******************************************************/ 01310 /* seek support */ 01311 01312 int av_find_default_stream_index(AVFormatContext *s) 01313 { 01314 int first_audio_index = -1; 01315 int i; 01316 AVStream *st; 01317 01318 if (s->nb_streams <= 0) 01319 return -1; 01320 for(i = 0; i < s->nb_streams; i++) { 01321 st = s->streams[i]; 01322 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 01323 return i; 01324 } 01325 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) 01326 first_audio_index = i; 01327 } 01328 return first_audio_index >= 0 ? first_audio_index : 0; 01329 } 01330 01334 void ff_read_frame_flush(AVFormatContext *s) 01335 { 01336 AVStream *st; 01337 int i, j; 01338 01339 flush_packet_queue(s); 01340 01341 s->cur_st = NULL; 01342 01343 /* for each stream, reset read state */ 01344 for(i = 0; i < s->nb_streams; i++) { 01345 st = s->streams[i]; 01346 01347 if (st->parser) { 01348 av_parser_close(st->parser); 01349 st->parser = NULL; 01350 av_free_packet(&st->cur_pkt); 01351 } 01352 st->last_IP_pts = AV_NOPTS_VALUE; 01353 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */ 01354 st->reference_dts = AV_NOPTS_VALUE; 01355 /* fail safe */ 01356 st->cur_ptr = NULL; 01357 st->cur_len = 0; 01358 01359 st->probe_packets = MAX_PROBE_PACKETS; 01360 01361 for(j=0; j<MAX_REORDER_DELAY+1; j++) 01362 st->pts_buffer[j]= AV_NOPTS_VALUE; 01363 } 01364 } 01365 01366 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){ 01367 int i; 01368 01369 for(i = 0; i < s->nb_streams; i++) { 01370 AVStream *st = s->streams[i]; 01371 01372 st->cur_dts = av_rescale(timestamp, 01373 st->time_base.den * (int64_t)ref_st->time_base.num, 01374 st->time_base.num * (int64_t)ref_st->time_base.den); 01375 } 01376 } 01377 01378 void ff_reduce_index(AVFormatContext *s, int stream_index) 01379 { 01380 AVStream *st= s->streams[stream_index]; 01381 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry); 01382 01383 if((unsigned)st->nb_index_entries >= max_entries){ 01384 int i; 01385 for(i=0; 2*i<st->nb_index_entries; i++) 01386 st->index_entries[i]= st->index_entries[2*i]; 01387 st->nb_index_entries= i; 01388 } 01389 } 01390 01391 int ff_add_index_entry(AVIndexEntry **index_entries, 01392 int *nb_index_entries, 01393 unsigned int *index_entries_allocated_size, 01394 int64_t pos, int64_t timestamp, int size, int distance, int flags) 01395 { 01396 AVIndexEntry *entries, *ie; 01397 int index; 01398 01399 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry)) 01400 return -1; 01401 01402 entries = av_fast_realloc(*index_entries, 01403 index_entries_allocated_size, 01404 (*nb_index_entries + 1) * 01405 sizeof(AVIndexEntry)); 01406 if(!entries) 01407 return -1; 01408 01409 *index_entries= entries; 01410 01411 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY); 01412 01413 if(index<0){ 01414 index= (*nb_index_entries)++; 01415 ie= &entries[index]; 01416 assert(index==0 || ie[-1].timestamp < timestamp); 01417 }else{ 01418 ie= &entries[index]; 01419 if(ie->timestamp != timestamp){ 01420 if(ie->timestamp <= timestamp) 01421 return -1; 01422 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index)); 01423 (*nb_index_entries)++; 01424 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance 01425 distance= ie->min_distance; 01426 } 01427 01428 ie->pos = pos; 01429 ie->timestamp = timestamp; 01430 ie->min_distance= distance; 01431 ie->size= size; 01432 ie->flags = flags; 01433 01434 return index; 01435 } 01436 01437 int av_add_index_entry(AVStream *st, 01438 int64_t pos, int64_t timestamp, int size, int distance, int flags) 01439 { 01440 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries, 01441 &st->index_entries_allocated_size, pos, 01442 timestamp, size, distance, flags); 01443 } 01444 01445 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries, 01446 int64_t wanted_timestamp, int flags) 01447 { 01448 int a, b, m; 01449 int64_t timestamp; 01450 01451 a = - 1; 01452 b = nb_entries; 01453 01454 //optimize appending index entries at the end 01455 if(b && entries[b-1].timestamp < wanted_timestamp) 01456 a= b-1; 01457 01458 while (b - a > 1) { 01459 m = (a + b) >> 1; 01460 timestamp = entries[m].timestamp; 01461 if(timestamp >= wanted_timestamp) 01462 b = m; 01463 if(timestamp <= wanted_timestamp) 01464 a = m; 01465 } 01466 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b; 01467 01468 if(!(flags & AVSEEK_FLAG_ANY)){ 01469 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){ 01470 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1; 01471 } 01472 } 01473 01474 if(m == nb_entries) 01475 return -1; 01476 return m; 01477 } 01478 01479 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, 01480 int flags) 01481 { 01482 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries, 01483 wanted_timestamp, flags); 01484 } 01485 01486 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){ 01487 AVInputFormat *avif= s->iformat; 01488 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit; 01489 int64_t ts_min, ts_max, ts; 01490 int index; 01491 int64_t ret; 01492 AVStream *st; 01493 01494 if (stream_index < 0) 01495 return -1; 01496 01497 av_dlog(s, "read_seek: %d %"PRId64"\n", stream_index, target_ts); 01498 01499 ts_max= 01500 ts_min= AV_NOPTS_VALUE; 01501 pos_limit= -1; //gcc falsely says it may be uninitialized 01502 01503 st= s->streams[stream_index]; 01504 if(st->index_entries){ 01505 AVIndexEntry *e; 01506 01507 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp() 01508 index= FFMAX(index, 0); 01509 e= &st->index_entries[index]; 01510 01511 if(e->timestamp <= target_ts || e->pos == e->min_distance){ 01512 pos_min= e->pos; 01513 ts_min= e->timestamp; 01514 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n", 01515 pos_min,ts_min); 01516 }else{ 01517 assert(index==0); 01518 } 01519 01520 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD); 01521 assert(index < st->nb_index_entries); 01522 if(index >= 0){ 01523 e= &st->index_entries[index]; 01524 assert(e->timestamp >= target_ts); 01525 pos_max= e->pos; 01526 ts_max= e->timestamp; 01527 pos_limit= pos_max - e->min_distance; 01528 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n", 01529 pos_max,pos_limit, ts_max); 01530 } 01531 } 01532 01533 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp); 01534 if(pos<0) 01535 return -1; 01536 01537 /* do the seek */ 01538 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0) 01539 return ret; 01540 01541 av_update_cur_dts(s, st, ts); 01542 01543 return 0; 01544 } 01545 01546 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){ 01547 int64_t pos, ts; 01548 int64_t start_pos, filesize; 01549 int no_change; 01550 01551 av_dlog(s, "gen_seek: %d %"PRId64"\n", stream_index, target_ts); 01552 01553 if(ts_min == AV_NOPTS_VALUE){ 01554 pos_min = s->data_offset; 01555 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 01556 if (ts_min == AV_NOPTS_VALUE) 01557 return -1; 01558 } 01559 01560 if(ts_max == AV_NOPTS_VALUE){ 01561 int step= 1024; 01562 filesize = avio_size(s->pb); 01563 pos_max = filesize - 1; 01564 do{ 01565 pos_max -= step; 01566 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step); 01567 step += step; 01568 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step); 01569 if (ts_max == AV_NOPTS_VALUE) 01570 return -1; 01571 01572 for(;;){ 01573 int64_t tmp_pos= pos_max + 1; 01574 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX); 01575 if(tmp_ts == AV_NOPTS_VALUE) 01576 break; 01577 ts_max= tmp_ts; 01578 pos_max= tmp_pos; 01579 if(tmp_pos >= filesize) 01580 break; 01581 } 01582 pos_limit= pos_max; 01583 } 01584 01585 if(ts_min > ts_max){ 01586 return -1; 01587 }else if(ts_min == ts_max){ 01588 pos_limit= pos_min; 01589 } 01590 01591 no_change=0; 01592 while (pos_min < pos_limit) { 01593 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n", 01594 pos_min, pos_max, ts_min, ts_max); 01595 assert(pos_limit <= pos_max); 01596 01597 if(no_change==0){ 01598 int64_t approximate_keyframe_distance= pos_max - pos_limit; 01599 // interpolate position (better than dichotomy) 01600 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min) 01601 + pos_min - approximate_keyframe_distance; 01602 }else if(no_change==1){ 01603 // bisection, if interpolation failed to change min or max pos last time 01604 pos = (pos_min + pos_limit)>>1; 01605 }else{ 01606 /* linear search if bisection failed, can only happen if there 01607 are very few or no keyframes between min/max */ 01608 pos=pos_min; 01609 } 01610 if(pos <= pos_min) 01611 pos= pos_min + 1; 01612 else if(pos > pos_limit) 01613 pos= pos_limit; 01614 start_pos= pos; 01615 01616 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1 01617 if(pos == pos_max) 01618 no_change++; 01619 else 01620 no_change=0; 01621 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n", 01622 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, 01623 pos_limit, start_pos, no_change); 01624 if(ts == AV_NOPTS_VALUE){ 01625 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n"); 01626 return -1; 01627 } 01628 assert(ts != AV_NOPTS_VALUE); 01629 if (target_ts <= ts) { 01630 pos_limit = start_pos - 1; 01631 pos_max = pos; 01632 ts_max = ts; 01633 } 01634 if (target_ts >= ts) { 01635 pos_min = pos; 01636 ts_min = ts; 01637 } 01638 } 01639 01640 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max; 01641 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max; 01642 pos_min = pos; 01643 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 01644 pos_min++; 01645 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX); 01646 av_dlog(s, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n", 01647 pos, ts_min, target_ts, ts_max); 01648 *ts_ret= ts; 01649 return pos; 01650 } 01651 01652 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){ 01653 int64_t pos_min, pos_max; 01654 #if 0 01655 AVStream *st; 01656 01657 if (stream_index < 0) 01658 return -1; 01659 01660 st= s->streams[stream_index]; 01661 #endif 01662 01663 pos_min = s->data_offset; 01664 pos_max = avio_size(s->pb) - 1; 01665 01666 if (pos < pos_min) pos= pos_min; 01667 else if(pos > pos_max) pos= pos_max; 01668 01669 avio_seek(s->pb, pos, SEEK_SET); 01670 01671 #if 0 01672 av_update_cur_dts(s, st, ts); 01673 #endif 01674 return 0; 01675 } 01676 01677 static int av_seek_frame_generic(AVFormatContext *s, 01678 int stream_index, int64_t timestamp, int flags) 01679 { 01680 int index; 01681 int64_t ret; 01682 AVStream *st; 01683 AVIndexEntry *ie; 01684 01685 st = s->streams[stream_index]; 01686 01687 index = av_index_search_timestamp(st, timestamp, flags); 01688 01689 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp) 01690 return -1; 01691 01692 if(index < 0 || index==st->nb_index_entries-1){ 01693 int i; 01694 AVPacket pkt; 01695 01696 if(st->nb_index_entries){ 01697 assert(st->index_entries); 01698 ie= &st->index_entries[st->nb_index_entries-1]; 01699 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0) 01700 return ret; 01701 av_update_cur_dts(s, st, ie->timestamp); 01702 }else{ 01703 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0) 01704 return ret; 01705 } 01706 for(i=0;; i++) { 01707 int ret; 01708 do{ 01709 ret = av_read_frame(s, &pkt); 01710 }while(ret == AVERROR(EAGAIN)); 01711 if(ret<0) 01712 break; 01713 av_free_packet(&pkt); 01714 if(stream_index == pkt.stream_index){ 01715 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp) 01716 break; 01717 } 01718 } 01719 index = av_index_search_timestamp(st, timestamp, flags); 01720 } 01721 if (index < 0) 01722 return -1; 01723 01724 ff_read_frame_flush(s); 01725 if (s->iformat->read_seek){ 01726 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0) 01727 return 0; 01728 } 01729 ie = &st->index_entries[index]; 01730 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0) 01731 return ret; 01732 av_update_cur_dts(s, st, ie->timestamp); 01733 01734 return 0; 01735 } 01736 01737 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) 01738 { 01739 int ret; 01740 AVStream *st; 01741 01742 ff_read_frame_flush(s); 01743 01744 if(flags & AVSEEK_FLAG_BYTE) 01745 return av_seek_frame_byte(s, stream_index, timestamp, flags); 01746 01747 if(stream_index < 0){ 01748 stream_index= av_find_default_stream_index(s); 01749 if(stream_index < 0) 01750 return -1; 01751 01752 st= s->streams[stream_index]; 01753 /* timestamp for default must be expressed in AV_TIME_BASE units */ 01754 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num); 01755 } 01756 01757 /* first, we try the format specific seek */ 01758 if (s->iformat->read_seek) 01759 ret = s->iformat->read_seek(s, stream_index, timestamp, flags); 01760 else 01761 ret = -1; 01762 if (ret >= 0) { 01763 return 0; 01764 } 01765 01766 if(s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) 01767 return av_seek_frame_binary(s, stream_index, timestamp, flags); 01768 else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) 01769 return av_seek_frame_generic(s, stream_index, timestamp, flags); 01770 else 01771 return -1; 01772 } 01773 01774 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags) 01775 { 01776 if(min_ts > ts || max_ts < ts) 01777 return -1; 01778 01779 ff_read_frame_flush(s); 01780 01781 if (s->iformat->read_seek2) 01782 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags); 01783 01784 if(s->iformat->read_timestamp){ 01785 //try to seek via read_timestamp() 01786 } 01787 01788 //Fallback to old API if new is not implemented but old is 01789 //Note the old has somewat different sematics 01790 if(s->iformat->read_seek || 1) 01791 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0)); 01792 01793 // try some generic seek like av_seek_frame_generic() but with new ts semantics 01794 } 01795 01796 /*******************************************************/ 01797 01803 static int av_has_duration(AVFormatContext *ic) 01804 { 01805 int i; 01806 AVStream *st; 01807 01808 for(i = 0;i < ic->nb_streams; i++) { 01809 st = ic->streams[i]; 01810 if (st->duration != AV_NOPTS_VALUE) 01811 return 1; 01812 } 01813 return 0; 01814 } 01815 01821 static void av_update_stream_timings(AVFormatContext *ic) 01822 { 01823 int64_t start_time, start_time1, end_time, end_time1; 01824 int64_t duration, duration1; 01825 int i; 01826 AVStream *st; 01827 01828 start_time = INT64_MAX; 01829 end_time = INT64_MIN; 01830 duration = INT64_MIN; 01831 for(i = 0;i < ic->nb_streams; i++) { 01832 st = ic->streams[i]; 01833 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) { 01834 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q); 01835 if (start_time1 < start_time) 01836 start_time = start_time1; 01837 if (st->duration != AV_NOPTS_VALUE) { 01838 end_time1 = start_time1 01839 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); 01840 if (end_time1 > end_time) 01841 end_time = end_time1; 01842 } 01843 } 01844 if (st->duration != AV_NOPTS_VALUE) { 01845 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q); 01846 if (duration1 > duration) 01847 duration = duration1; 01848 } 01849 } 01850 if (start_time != INT64_MAX) { 01851 ic->start_time = start_time; 01852 if (end_time != INT64_MIN) { 01853 if (end_time - start_time > duration) 01854 duration = end_time - start_time; 01855 } 01856 } 01857 if (duration != INT64_MIN) { 01858 ic->duration = duration; 01859 if (ic->file_size > 0) { 01860 /* compute the bitrate */ 01861 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE / 01862 (double)ic->duration; 01863 } 01864 } 01865 } 01866 01867 static void fill_all_stream_timings(AVFormatContext *ic) 01868 { 01869 int i; 01870 AVStream *st; 01871 01872 av_update_stream_timings(ic); 01873 for(i = 0;i < ic->nb_streams; i++) { 01874 st = ic->streams[i]; 01875 if (st->start_time == AV_NOPTS_VALUE) { 01876 if(ic->start_time != AV_NOPTS_VALUE) 01877 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base); 01878 if(ic->duration != AV_NOPTS_VALUE) 01879 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base); 01880 } 01881 } 01882 } 01883 01884 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic) 01885 { 01886 int64_t filesize, duration; 01887 int bit_rate, i; 01888 AVStream *st; 01889 01890 /* if bit_rate is already set, we believe it */ 01891 if (ic->bit_rate <= 0) { 01892 bit_rate = 0; 01893 for(i=0;i<ic->nb_streams;i++) { 01894 st = ic->streams[i]; 01895 if (st->codec->bit_rate > 0) 01896 bit_rate += st->codec->bit_rate; 01897 } 01898 ic->bit_rate = bit_rate; 01899 } 01900 01901 /* if duration is already set, we believe it */ 01902 if (ic->duration == AV_NOPTS_VALUE && 01903 ic->bit_rate != 0 && 01904 ic->file_size != 0) { 01905 filesize = ic->file_size; 01906 if (filesize > 0) { 01907 for(i = 0; i < ic->nb_streams; i++) { 01908 st = ic->streams[i]; 01909 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num); 01910 if (st->duration == AV_NOPTS_VALUE) 01911 st->duration = duration; 01912 } 01913 } 01914 } 01915 } 01916 01917 #define DURATION_MAX_READ_SIZE 250000 01918 #define DURATION_MAX_RETRY 3 01919 01920 /* only usable for MPEG-PS streams */ 01921 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) 01922 { 01923 AVPacket pkt1, *pkt = &pkt1; 01924 AVStream *st; 01925 int read_size, i, ret; 01926 int64_t end_time; 01927 int64_t filesize, offset, duration; 01928 int retry=0; 01929 01930 ic->cur_st = NULL; 01931 01932 /* flush packet queue */ 01933 flush_packet_queue(ic); 01934 01935 for (i=0; i<ic->nb_streams; i++) { 01936 st = ic->streams[i]; 01937 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE) 01938 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n"); 01939 01940 if (st->parser) { 01941 av_parser_close(st->parser); 01942 st->parser= NULL; 01943 av_free_packet(&st->cur_pkt); 01944 } 01945 } 01946 01947 /* estimate the end time (duration) */ 01948 /* XXX: may need to support wrapping */ 01949 filesize = ic->file_size; 01950 end_time = AV_NOPTS_VALUE; 01951 do{ 01952 offset = filesize - (DURATION_MAX_READ_SIZE<<retry); 01953 if (offset < 0) 01954 offset = 0; 01955 01956 avio_seek(ic->pb, offset, SEEK_SET); 01957 read_size = 0; 01958 for(;;) { 01959 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0))) 01960 break; 01961 01962 do{ 01963 ret = av_read_packet(ic, pkt); 01964 }while(ret == AVERROR(EAGAIN)); 01965 if (ret != 0) 01966 break; 01967 read_size += pkt->size; 01968 st = ic->streams[pkt->stream_index]; 01969 if (pkt->pts != AV_NOPTS_VALUE && 01970 (st->start_time != AV_NOPTS_VALUE || 01971 st->first_dts != AV_NOPTS_VALUE)) { 01972 duration = end_time = pkt->pts; 01973 if (st->start_time != AV_NOPTS_VALUE) duration -= st->start_time; 01974 else duration -= st->first_dts; 01975 if (duration < 0) 01976 duration += 1LL<<st->pts_wrap_bits; 01977 if (duration > 0) { 01978 if (st->duration == AV_NOPTS_VALUE || 01979 st->duration < duration) 01980 st->duration = duration; 01981 } 01982 } 01983 av_free_packet(pkt); 01984 } 01985 }while( end_time==AV_NOPTS_VALUE 01986 && filesize > (DURATION_MAX_READ_SIZE<<retry) 01987 && ++retry <= DURATION_MAX_RETRY); 01988 01989 fill_all_stream_timings(ic); 01990 01991 avio_seek(ic->pb, old_offset, SEEK_SET); 01992 for (i=0; i<ic->nb_streams; i++) { 01993 st= ic->streams[i]; 01994 st->cur_dts= st->first_dts; 01995 st->last_IP_pts = AV_NOPTS_VALUE; 01996 st->reference_dts = AV_NOPTS_VALUE; 01997 } 01998 } 01999 02000 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset) 02001 { 02002 int64_t file_size; 02003 02004 /* get the file size, if possible */ 02005 if (ic->iformat->flags & AVFMT_NOFILE) { 02006 file_size = 0; 02007 } else { 02008 file_size = avio_size(ic->pb); 02009 if (file_size < 0) 02010 file_size = 0; 02011 } 02012 ic->file_size = file_size; 02013 02014 if ((!strcmp(ic->iformat->name, "mpeg") || 02015 !strcmp(ic->iformat->name, "mpegts")) && 02016 file_size && ic->pb->seekable) { 02017 /* get accurate estimate from the PTSes */ 02018 av_estimate_timings_from_pts(ic, old_offset); 02019 } else if (av_has_duration(ic)) { 02020 /* at least one component has timings - we use them for all 02021 the components */ 02022 fill_all_stream_timings(ic); 02023 } else { 02024 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n"); 02025 /* less precise: use bitrate info */ 02026 av_estimate_timings_from_bit_rate(ic); 02027 } 02028 av_update_stream_timings(ic); 02029 02030 { 02031 int i; 02032 AVStream av_unused *st; 02033 for(i = 0;i < ic->nb_streams; i++) { 02034 st = ic->streams[i]; 02035 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i, 02036 (double) st->start_time / AV_TIME_BASE, 02037 (double) st->duration / AV_TIME_BASE); 02038 } 02039 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n", 02040 (double) ic->start_time / AV_TIME_BASE, 02041 (double) ic->duration / AV_TIME_BASE, 02042 ic->bit_rate / 1000); 02043 } 02044 } 02045 02046 static int has_codec_parameters(AVCodecContext *enc) 02047 { 02048 int val; 02049 switch(enc->codec_type) { 02050 case AVMEDIA_TYPE_AUDIO: 02051 val = enc->sample_rate && enc->channels && enc->sample_fmt != AV_SAMPLE_FMT_NONE; 02052 if(!enc->frame_size && 02053 (enc->codec_id == CODEC_ID_VORBIS || 02054 enc->codec_id == CODEC_ID_AAC || 02055 enc->codec_id == CODEC_ID_MP1 || 02056 enc->codec_id == CODEC_ID_MP2 || 02057 enc->codec_id == CODEC_ID_MP3 || 02058 enc->codec_id == CODEC_ID_SPEEX)) 02059 return 0; 02060 break; 02061 case AVMEDIA_TYPE_VIDEO: 02062 val = enc->width && enc->pix_fmt != PIX_FMT_NONE; 02063 break; 02064 default: 02065 val = 1; 02066 break; 02067 } 02068 return enc->codec_id != CODEC_ID_NONE && val != 0; 02069 } 02070 02071 static int has_decode_delay_been_guessed(AVStream *st) 02072 { 02073 return st->codec->codec_id != CODEC_ID_H264 || 02074 st->codec_info_nb_frames >= 6 + st->codec->has_b_frames; 02075 } 02076 02077 static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **options) 02078 { 02079 int16_t *samples; 02080 AVCodec *codec; 02081 int got_picture, data_size, ret=0; 02082 AVFrame picture; 02083 02084 if(!st->codec->codec){ 02085 codec = avcodec_find_decoder(st->codec->codec_id); 02086 if (!codec) 02087 return -1; 02088 ret = avcodec_open2(st->codec, codec, options); 02089 if (ret < 0) 02090 return ret; 02091 } 02092 02093 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)){ 02094 switch(st->codec->codec_type) { 02095 case AVMEDIA_TYPE_VIDEO: 02096 avcodec_get_frame_defaults(&picture); 02097 ret = avcodec_decode_video2(st->codec, &picture, 02098 &got_picture, avpkt); 02099 break; 02100 case AVMEDIA_TYPE_AUDIO: 02101 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE); 02102 samples = av_malloc(data_size); 02103 if (!samples) 02104 goto fail; 02105 ret = avcodec_decode_audio3(st->codec, samples, 02106 &data_size, avpkt); 02107 av_free(samples); 02108 break; 02109 default: 02110 break; 02111 } 02112 } 02113 fail: 02114 return ret; 02115 } 02116 02117 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id) 02118 { 02119 while (tags->id != CODEC_ID_NONE) { 02120 if (tags->id == id) 02121 return tags->tag; 02122 tags++; 02123 } 02124 return 0; 02125 } 02126 02127 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag) 02128 { 02129 int i; 02130 for(i=0; tags[i].id != CODEC_ID_NONE;i++) { 02131 if(tag == tags[i].tag) 02132 return tags[i].id; 02133 } 02134 for(i=0; tags[i].id != CODEC_ID_NONE; i++) { 02135 if (ff_toupper4(tag) == ff_toupper4(tags[i].tag)) 02136 return tags[i].id; 02137 } 02138 return CODEC_ID_NONE; 02139 } 02140 02141 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id) 02142 { 02143 int i; 02144 for(i=0; tags && tags[i]; i++){ 02145 int tag= ff_codec_get_tag(tags[i], id); 02146 if(tag) return tag; 02147 } 02148 return 0; 02149 } 02150 02151 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag) 02152 { 02153 int i; 02154 for(i=0; tags && tags[i]; i++){ 02155 enum CodecID id= ff_codec_get_id(tags[i], tag); 02156 if(id!=CODEC_ID_NONE) return id; 02157 } 02158 return CODEC_ID_NONE; 02159 } 02160 02161 static void compute_chapters_end(AVFormatContext *s) 02162 { 02163 unsigned int i, j; 02164 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time); 02165 02166 for (i = 0; i < s->nb_chapters; i++) 02167 if (s->chapters[i]->end == AV_NOPTS_VALUE) { 02168 AVChapter *ch = s->chapters[i]; 02169 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base) 02170 : INT64_MAX; 02171 02172 for (j = 0; j < s->nb_chapters; j++) { 02173 AVChapter *ch1 = s->chapters[j]; 02174 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base); 02175 if (j != i && next_start > ch->start && next_start < end) 02176 end = next_start; 02177 } 02178 ch->end = (end == INT64_MAX) ? ch->start : end; 02179 } 02180 } 02181 02182 static int get_std_framerate(int i){ 02183 if(i<60*12) return i*1001; 02184 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12; 02185 } 02186 02187 /* 02188 * Is the time base unreliable. 02189 * This is a heuristic to balance between quick acceptance of the values in 02190 * the headers vs. some extra checks. 02191 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps. 02192 * MPEG-2 commonly misuses field repeat flags to store different framerates. 02193 * And there are "variable" fps files this needs to detect as well. 02194 */ 02195 static int tb_unreliable(AVCodecContext *c){ 02196 if( c->time_base.den >= 101L*c->time_base.num 02197 || c->time_base.den < 5L*c->time_base.num 02198 /* || c->codec_tag == AV_RL32("DIVX") 02199 || c->codec_tag == AV_RL32("XVID")*/ 02200 || c->codec_id == CODEC_ID_MPEG2VIDEO 02201 || c->codec_id == CODEC_ID_H264 02202 ) 02203 return 1; 02204 return 0; 02205 } 02206 02207 #if FF_API_FORMAT_PARAMETERS 02208 int av_find_stream_info(AVFormatContext *ic) 02209 { 02210 return avformat_find_stream_info(ic, NULL); 02211 } 02212 #endif 02213 02214 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) 02215 { 02216 int i, count, ret, read_size, j; 02217 AVStream *st; 02218 AVPacket pkt1, *pkt; 02219 int64_t old_offset = avio_tell(ic->pb); 02220 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those 02221 02222 for(i=0;i<ic->nb_streams;i++) { 02223 AVCodec *codec; 02224 st = ic->streams[i]; 02225 if (st->codec->codec_id == CODEC_ID_AAC) { 02226 st->codec->sample_rate = 0; 02227 st->codec->frame_size = 0; 02228 st->codec->channels = 0; 02229 } 02230 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO || 02231 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { 02232 /* if(!st->time_base.num) 02233 st->time_base= */ 02234 if(!st->codec->time_base.num) 02235 st->codec->time_base= st->time_base; 02236 } 02237 //only for the split stuff 02238 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) { 02239 st->parser = av_parser_init(st->codec->codec_id); 02240 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){ 02241 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; 02242 } 02243 } 02244 assert(!st->codec->codec); 02245 codec = avcodec_find_decoder(st->codec->codec_id); 02246 02247 /* Force decoding of at least one frame of codec data 02248 * this makes sure the codec initializes the channel configuration 02249 * and does not trust the values from the container. 02250 */ 02251 if (codec && codec->capabilities & CODEC_CAP_CHANNEL_CONF) 02252 st->codec->channels = 0; 02253 02254 /* Ensure that subtitle_header is properly set. */ 02255 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE 02256 && codec && !st->codec->codec) 02257 avcodec_open2(st->codec, codec, options ? &options[i] : NULL); 02258 02259 //try to just open decoders, in case this is enough to get parameters 02260 if(!has_codec_parameters(st->codec)){ 02261 if (codec && !st->codec->codec) 02262 avcodec_open2(st->codec, codec, options ? &options[i] : NULL); 02263 } 02264 } 02265 02266 for (i=0; i<ic->nb_streams; i++) { 02267 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE; 02268 } 02269 02270 count = 0; 02271 read_size = 0; 02272 for(;;) { 02273 if(url_interrupt_cb()){ 02274 ret= AVERROR_EXIT; 02275 av_log(ic, AV_LOG_DEBUG, "interrupted\n"); 02276 break; 02277 } 02278 02279 /* check if one codec still needs to be handled */ 02280 for(i=0;i<ic->nb_streams;i++) { 02281 int fps_analyze_framecount = 20; 02282 02283 st = ic->streams[i]; 02284 if (!has_codec_parameters(st->codec)) 02285 break; 02286 /* if the timebase is coarse (like the usual millisecond precision 02287 of mkv), we need to analyze more frames to reliably arrive at 02288 the correct fps */ 02289 if (av_q2d(st->time_base) > 0.0005) 02290 fps_analyze_framecount *= 2; 02291 if (ic->fps_probe_size >= 0) 02292 fps_analyze_framecount = ic->fps_probe_size; 02293 /* variable fps and no guess at the real fps */ 02294 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num) 02295 && st->info->duration_count < fps_analyze_framecount 02296 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 02297 break; 02298 if(st->parser && st->parser->parser->split && !st->codec->extradata) 02299 break; 02300 if(st->first_dts == AV_NOPTS_VALUE) 02301 break; 02302 } 02303 if (i == ic->nb_streams) { 02304 /* NOTE: if the format has no header, then we need to read 02305 some packets to get most of the streams, so we cannot 02306 stop here */ 02307 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) { 02308 /* if we found the info for all the codecs, we can stop */ 02309 ret = count; 02310 av_log(ic, AV_LOG_DEBUG, "All info found\n"); 02311 break; 02312 } 02313 } 02314 /* we did not get all the codec info, but we read too much data */ 02315 if (read_size >= ic->probesize) { 02316 ret = count; 02317 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize); 02318 break; 02319 } 02320 02321 /* NOTE: a new stream can be added there if no header in file 02322 (AVFMTCTX_NOHEADER) */ 02323 ret = av_read_frame_internal(ic, &pkt1); 02324 if (ret < 0 && ret != AVERROR(EAGAIN)) { 02325 /* EOF or error */ 02326 ret = -1; /* we could not have all the codec parameters before EOF */ 02327 for(i=0;i<ic->nb_streams;i++) { 02328 st = ic->streams[i]; 02329 if (!has_codec_parameters(st->codec)){ 02330 char buf[256]; 02331 avcodec_string(buf, sizeof(buf), st->codec, 0); 02332 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf); 02333 } else { 02334 ret = 0; 02335 } 02336 } 02337 break; 02338 } 02339 02340 if (ret == AVERROR(EAGAIN)) 02341 continue; 02342 02343 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end); 02344 if ((ret = av_dup_packet(pkt)) < 0) 02345 goto find_stream_info_err; 02346 02347 read_size += pkt->size; 02348 02349 st = ic->streams[pkt->stream_index]; 02350 if (st->codec_info_nb_frames>1) { 02351 if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) { 02352 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n"); 02353 break; 02354 } 02355 st->info->codec_info_duration += pkt->duration; 02356 } 02357 { 02358 int64_t last = st->info->last_dts; 02359 02360 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last){ 02361 int64_t duration= pkt->dts - last; 02362 double dur= duration * av_q2d(st->time_base); 02363 02364 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 02365 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur); 02366 if (st->info->duration_count < 2) 02367 memset(st->info->duration_error, 0, sizeof(st->info->duration_error)); 02368 for (i=1; i<FF_ARRAY_ELEMS(st->info->duration_error); i++) { 02369 int framerate= get_std_framerate(i); 02370 int ticks= lrintf(dur*framerate/(1001*12)); 02371 double error = dur - (double)ticks*1001*12 / framerate; 02372 st->info->duration_error[i] += error*error; 02373 } 02374 st->info->duration_count++; 02375 // ignore the first 4 values, they might have some random jitter 02376 if (st->info->duration_count > 3) 02377 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration); 02378 } 02379 if (last == AV_NOPTS_VALUE || st->info->duration_count <= 1) 02380 st->info->last_dts = pkt->dts; 02381 } 02382 if(st->parser && st->parser->parser->split && !st->codec->extradata){ 02383 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size); 02384 if(i){ 02385 st->codec->extradata_size= i; 02386 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE); 02387 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size); 02388 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE); 02389 } 02390 } 02391 02392 /* if still no information, we try to open the codec and to 02393 decompress the frame. We try to avoid that in most cases as 02394 it takes longer and uses more memory. For MPEG-4, we need to 02395 decompress for QuickTime. */ 02396 if (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)) 02397 try_decode_frame(st, pkt, (options && i < orig_nb_streams )? &options[i] : NULL); 02398 02399 st->codec_info_nb_frames++; 02400 count++; 02401 } 02402 02403 // close codecs which were opened in try_decode_frame() 02404 for(i=0;i<ic->nb_streams;i++) { 02405 st = ic->streams[i]; 02406 if(st->codec->codec) 02407 avcodec_close(st->codec); 02408 } 02409 for(i=0;i<ic->nb_streams;i++) { 02410 st = ic->streams[i]; 02411 if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration) 02412 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, 02413 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den, 02414 st->info->codec_info_duration*(int64_t)st->time_base.num, 60000); 02415 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 02416 // the check for tb_unreliable() is not completely correct, since this is not about handling 02417 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g. 02418 // ipmovie.c produces. 02419 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > 1 && !st->r_frame_rate.num) 02420 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX); 02421 if (st->info->duration_count && !st->r_frame_rate.num 02422 && tb_unreliable(st->codec) /*&& 02423 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ... 02424 st->time_base.num*duration_sum[i]/st->info->duration_count*101LL > st->time_base.den*/){ 02425 int num = 0; 02426 double best_error= 2*av_q2d(st->time_base); 02427 best_error = best_error*best_error*st->info->duration_count*1000*12*30; 02428 02429 for (j=1; j<FF_ARRAY_ELEMS(st->info->duration_error); j++) { 02430 double error = st->info->duration_error[j] * get_std_framerate(j); 02431 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO) 02432 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error); 02433 if(error < best_error){ 02434 best_error= error; 02435 num = get_std_framerate(j); 02436 } 02437 } 02438 // do not increase frame rate by more than 1 % in order to match a standard rate. 02439 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate))) 02440 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX); 02441 } 02442 02443 if (!st->r_frame_rate.num){ 02444 if( st->codec->time_base.den * (int64_t)st->time_base.num 02445 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){ 02446 st->r_frame_rate.num = st->codec->time_base.den; 02447 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame; 02448 }else{ 02449 st->r_frame_rate.num = st->time_base.den; 02450 st->r_frame_rate.den = st->time_base.num; 02451 } 02452 } 02453 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) { 02454 if(!st->codec->bits_per_coded_sample) 02455 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id); 02456 // set stream disposition based on audio service type 02457 switch (st->codec->audio_service_type) { 02458 case AV_AUDIO_SERVICE_TYPE_EFFECTS: 02459 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break; 02460 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED: 02461 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break; 02462 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED: 02463 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break; 02464 case AV_AUDIO_SERVICE_TYPE_COMMENTARY: 02465 st->disposition = AV_DISPOSITION_COMMENT; break; 02466 case AV_AUDIO_SERVICE_TYPE_KARAOKE: 02467 st->disposition = AV_DISPOSITION_KARAOKE; break; 02468 } 02469 } 02470 } 02471 02472 av_estimate_timings(ic, old_offset); 02473 02474 compute_chapters_end(ic); 02475 02476 #if 0 02477 /* correct DTS for B-frame streams with no timestamps */ 02478 for(i=0;i<ic->nb_streams;i++) { 02479 st = ic->streams[i]; 02480 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 02481 if(b-frames){ 02482 ppktl = &ic->packet_buffer; 02483 while(ppkt1){ 02484 if(ppkt1->stream_index != i) 02485 continue; 02486 if(ppkt1->pkt->dts < 0) 02487 break; 02488 if(ppkt1->pkt->pts != AV_NOPTS_VALUE) 02489 break; 02490 ppkt1->pkt->dts -= delta; 02491 ppkt1= ppkt1->next; 02492 } 02493 if(ppkt1) 02494 continue; 02495 st->cur_dts -= delta; 02496 } 02497 } 02498 } 02499 #endif 02500 02501 find_stream_info_err: 02502 for (i=0; i < ic->nb_streams; i++) 02503 av_freep(&ic->streams[i]->info); 02504 return ret; 02505 } 02506 02507 static AVProgram *find_program_from_stream(AVFormatContext *ic, int s) 02508 { 02509 int i, j; 02510 02511 for (i = 0; i < ic->nb_programs; i++) 02512 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++) 02513 if (ic->programs[i]->stream_index[j] == s) 02514 return ic->programs[i]; 02515 return NULL; 02516 } 02517 02518 int av_find_best_stream(AVFormatContext *ic, 02519 enum AVMediaType type, 02520 int wanted_stream_nb, 02521 int related_stream, 02522 AVCodec **decoder_ret, 02523 int flags) 02524 { 02525 int i, nb_streams = ic->nb_streams; 02526 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1; 02527 unsigned *program = NULL; 02528 AVCodec *decoder = NULL, *best_decoder = NULL; 02529 02530 if (related_stream >= 0 && wanted_stream_nb < 0) { 02531 AVProgram *p = find_program_from_stream(ic, related_stream); 02532 if (p) { 02533 program = p->stream_index; 02534 nb_streams = p->nb_stream_indexes; 02535 } 02536 } 02537 for (i = 0; i < nb_streams; i++) { 02538 int real_stream_index = program ? program[i] : i; 02539 AVStream *st = ic->streams[real_stream_index]; 02540 AVCodecContext *avctx = st->codec; 02541 if (avctx->codec_type != type) 02542 continue; 02543 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb) 02544 continue; 02545 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED)) 02546 continue; 02547 if (decoder_ret) { 02548 decoder = avcodec_find_decoder(st->codec->codec_id); 02549 if (!decoder) { 02550 if (ret < 0) 02551 ret = AVERROR_DECODER_NOT_FOUND; 02552 continue; 02553 } 02554 } 02555 if (best_count >= st->codec_info_nb_frames) 02556 continue; 02557 best_count = st->codec_info_nb_frames; 02558 ret = real_stream_index; 02559 best_decoder = decoder; 02560 if (program && i == nb_streams - 1 && ret < 0) { 02561 program = NULL; 02562 nb_streams = ic->nb_streams; 02563 i = 0; /* no related stream found, try again with everything */ 02564 } 02565 } 02566 if (decoder_ret) 02567 *decoder_ret = best_decoder; 02568 return ret; 02569 } 02570 02571 /*******************************************************/ 02572 02573 int av_read_play(AVFormatContext *s) 02574 { 02575 if (s->iformat->read_play) 02576 return s->iformat->read_play(s); 02577 if (s->pb) 02578 return avio_pause(s->pb, 0); 02579 return AVERROR(ENOSYS); 02580 } 02581 02582 int av_read_pause(AVFormatContext *s) 02583 { 02584 if (s->iformat->read_pause) 02585 return s->iformat->read_pause(s); 02586 if (s->pb) 02587 return avio_pause(s->pb, 1); 02588 return AVERROR(ENOSYS); 02589 } 02590 02591 void av_close_input_stream(AVFormatContext *s) 02592 { 02593 flush_packet_queue(s); 02594 if (s->iformat->read_close) 02595 s->iformat->read_close(s); 02596 avformat_free_context(s); 02597 } 02598 02599 void avformat_free_context(AVFormatContext *s) 02600 { 02601 int i; 02602 AVStream *st; 02603 02604 av_opt_free(s); 02605 if (s->iformat && s->iformat->priv_class && s->priv_data) 02606 av_opt_free(s->priv_data); 02607 02608 for(i=0;i<s->nb_streams;i++) { 02609 /* free all data in a stream component */ 02610 st = s->streams[i]; 02611 if (st->parser) { 02612 av_parser_close(st->parser); 02613 av_free_packet(&st->cur_pkt); 02614 } 02615 av_dict_free(&st->metadata); 02616 av_free(st->index_entries); 02617 av_free(st->codec->extradata); 02618 av_free(st->codec->subtitle_header); 02619 av_free(st->codec); 02620 av_free(st->priv_data); 02621 av_free(st->info); 02622 av_free(st); 02623 } 02624 for(i=s->nb_programs-1; i>=0; i--) { 02625 av_dict_free(&s->programs[i]->metadata); 02626 av_freep(&s->programs[i]->stream_index); 02627 av_freep(&s->programs[i]); 02628 } 02629 av_freep(&s->programs); 02630 av_freep(&s->priv_data); 02631 while(s->nb_chapters--) { 02632 av_dict_free(&s->chapters[s->nb_chapters]->metadata); 02633 av_free(s->chapters[s->nb_chapters]); 02634 } 02635 av_freep(&s->chapters); 02636 av_dict_free(&s->metadata); 02637 av_freep(&s->streams); 02638 av_free(s); 02639 } 02640 02641 void av_close_input_file(AVFormatContext *s) 02642 { 02643 AVIOContext *pb = (s->iformat->flags & AVFMT_NOFILE) || (s->flags & AVFMT_FLAG_CUSTOM_IO) ? 02644 NULL : s->pb; 02645 av_close_input_stream(s); 02646 if (pb) 02647 avio_close(pb); 02648 } 02649 02650 AVStream *av_new_stream(AVFormatContext *s, int id) 02651 { 02652 AVStream *st; 02653 int i; 02654 AVStream **streams; 02655 02656 if (s->nb_streams >= INT_MAX/sizeof(*streams)) 02657 return NULL; 02658 streams = av_realloc(s->streams, (s->nb_streams + 1) * sizeof(*streams)); 02659 if (!streams) 02660 return NULL; 02661 s->streams = streams; 02662 02663 st = av_mallocz(sizeof(AVStream)); 02664 if (!st) 02665 return NULL; 02666 if (!(st->info = av_mallocz(sizeof(*st->info)))) { 02667 av_free(st); 02668 return NULL; 02669 } 02670 02671 st->codec= avcodec_alloc_context(); 02672 if (s->iformat) { 02673 /* no default bitrate if decoding */ 02674 st->codec->bit_rate = 0; 02675 } 02676 st->index = s->nb_streams; 02677 st->id = id; 02678 st->start_time = AV_NOPTS_VALUE; 02679 st->duration = AV_NOPTS_VALUE; 02680 /* we set the current DTS to 0 so that formats without any timestamps 02681 but durations get some timestamps, formats with some unknown 02682 timestamps have their first few packets buffered and the 02683 timestamps corrected before they are returned to the user */ 02684 st->cur_dts = 0; 02685 st->first_dts = AV_NOPTS_VALUE; 02686 st->probe_packets = MAX_PROBE_PACKETS; 02687 02688 /* default pts setting is MPEG-like */ 02689 av_set_pts_info(st, 33, 1, 90000); 02690 st->last_IP_pts = AV_NOPTS_VALUE; 02691 for(i=0; i<MAX_REORDER_DELAY+1; i++) 02692 st->pts_buffer[i]= AV_NOPTS_VALUE; 02693 st->reference_dts = AV_NOPTS_VALUE; 02694 02695 st->sample_aspect_ratio = (AVRational){0,1}; 02696 02697 s->streams[s->nb_streams++] = st; 02698 return st; 02699 } 02700 02701 AVProgram *av_new_program(AVFormatContext *ac, int id) 02702 { 02703 AVProgram *program=NULL; 02704 int i; 02705 02706 av_dlog(ac, "new_program: id=0x%04x\n", id); 02707 02708 for(i=0; i<ac->nb_programs; i++) 02709 if(ac->programs[i]->id == id) 02710 program = ac->programs[i]; 02711 02712 if(!program){ 02713 program = av_mallocz(sizeof(AVProgram)); 02714 if (!program) 02715 return NULL; 02716 dynarray_add(&ac->programs, &ac->nb_programs, program); 02717 program->discard = AVDISCARD_NONE; 02718 } 02719 program->id = id; 02720 02721 return program; 02722 } 02723 02724 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title) 02725 { 02726 AVChapter *chapter = NULL; 02727 int i; 02728 02729 for(i=0; i<s->nb_chapters; i++) 02730 if(s->chapters[i]->id == id) 02731 chapter = s->chapters[i]; 02732 02733 if(!chapter){ 02734 chapter= av_mallocz(sizeof(AVChapter)); 02735 if(!chapter) 02736 return NULL; 02737 dynarray_add(&s->chapters, &s->nb_chapters, chapter); 02738 } 02739 av_dict_set(&chapter->metadata, "title", title, 0); 02740 chapter->id = id; 02741 chapter->time_base= time_base; 02742 chapter->start = start; 02743 chapter->end = end; 02744 02745 return chapter; 02746 } 02747 02748 /************************************************************/ 02749 /* output media file */ 02750 02751 #if FF_API_FORMAT_PARAMETERS 02752 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap) 02753 { 02754 int ret; 02755 02756 if (s->oformat->priv_data_size > 0) { 02757 s->priv_data = av_mallocz(s->oformat->priv_data_size); 02758 if (!s->priv_data) 02759 return AVERROR(ENOMEM); 02760 if (s->oformat->priv_class) { 02761 *(const AVClass**)s->priv_data= s->oformat->priv_class; 02762 av_opt_set_defaults(s->priv_data); 02763 } 02764 } else 02765 s->priv_data = NULL; 02766 02767 if (s->oformat->set_parameters) { 02768 ret = s->oformat->set_parameters(s, ap); 02769 if (ret < 0) 02770 return ret; 02771 } 02772 return 0; 02773 } 02774 #endif 02775 02776 static int validate_codec_tag(AVFormatContext *s, AVStream *st) 02777 { 02778 const AVCodecTag *avctag; 02779 int n; 02780 enum CodecID id = CODEC_ID_NONE; 02781 unsigned int tag = 0; 02782 02789 for (n = 0; s->oformat->codec_tag[n]; n++) { 02790 avctag = s->oformat->codec_tag[n]; 02791 while (avctag->id != CODEC_ID_NONE) { 02792 if (ff_toupper4(avctag->tag) == ff_toupper4(st->codec->codec_tag)) { 02793 id = avctag->id; 02794 if (id == st->codec->codec_id) 02795 return 1; 02796 } 02797 if (avctag->id == st->codec->codec_id) 02798 tag = avctag->tag; 02799 avctag++; 02800 } 02801 } 02802 if (id != CODEC_ID_NONE) 02803 return 0; 02804 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL)) 02805 return 0; 02806 return 1; 02807 } 02808 02809 #if FF_API_FORMAT_PARAMETERS 02810 int av_write_header(AVFormatContext *s) 02811 { 02812 return avformat_write_header(s, NULL); 02813 } 02814 #endif 02815 02816 int avformat_write_header(AVFormatContext *s, AVDictionary **options) 02817 { 02818 int ret = 0, i; 02819 AVStream *st; 02820 AVDictionary *tmp = NULL; 02821 02822 if (options) 02823 av_dict_copy(&tmp, *options, 0); 02824 if ((ret = av_opt_set_dict(s, &tmp)) < 0) 02825 goto fail; 02826 02827 // some sanity checks 02828 if (s->nb_streams == 0 && !(s->oformat->flags & AVFMT_NOSTREAMS)) { 02829 av_log(s, AV_LOG_ERROR, "no streams\n"); 02830 ret = AVERROR(EINVAL); 02831 goto fail; 02832 } 02833 02834 for(i=0;i<s->nb_streams;i++) { 02835 st = s->streams[i]; 02836 02837 switch (st->codec->codec_type) { 02838 case AVMEDIA_TYPE_AUDIO: 02839 if(st->codec->sample_rate<=0){ 02840 av_log(s, AV_LOG_ERROR, "sample rate not set\n"); 02841 ret = AVERROR(EINVAL); 02842 goto fail; 02843 } 02844 if(!st->codec->block_align) 02845 st->codec->block_align = st->codec->channels * 02846 av_get_bits_per_sample(st->codec->codec_id) >> 3; 02847 break; 02848 case AVMEDIA_TYPE_VIDEO: 02849 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too? 02850 av_log(s, AV_LOG_ERROR, "time base not set\n"); 02851 ret = AVERROR(EINVAL); 02852 goto fail; 02853 } 02854 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){ 02855 av_log(s, AV_LOG_ERROR, "dimensions not set\n"); 02856 ret = AVERROR(EINVAL); 02857 goto fail; 02858 } 02859 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){ 02860 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n"); 02861 ret = AVERROR(EINVAL); 02862 goto fail; 02863 } 02864 break; 02865 } 02866 02867 if(s->oformat->codec_tag){ 02868 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){ 02869 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here 02870 st->codec->codec_tag= 0; 02871 } 02872 if(st->codec->codec_tag){ 02873 if (!validate_codec_tag(s, st)) { 02874 char tagbuf[32]; 02875 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag); 02876 av_log(s, AV_LOG_ERROR, 02877 "Tag %s/0x%08x incompatible with output codec id '%d'\n", 02878 tagbuf, st->codec->codec_tag, st->codec->codec_id); 02879 ret = AVERROR_INVALIDDATA; 02880 goto fail; 02881 } 02882 }else 02883 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id); 02884 } 02885 02886 if(s->oformat->flags & AVFMT_GLOBALHEADER && 02887 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER)) 02888 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i); 02889 } 02890 02891 if (!s->priv_data && s->oformat->priv_data_size > 0) { 02892 s->priv_data = av_mallocz(s->oformat->priv_data_size); 02893 if (!s->priv_data) { 02894 ret = AVERROR(ENOMEM); 02895 goto fail; 02896 } 02897 if (s->oformat->priv_class) { 02898 *(const AVClass**)s->priv_data= s->oformat->priv_class; 02899 av_opt_set_defaults(s->priv_data); 02900 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0) 02901 goto fail; 02902 } 02903 } 02904 02905 /* set muxer identification string */ 02906 if (s->nb_streams && !(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) { 02907 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0); 02908 } 02909 02910 if(s->oformat->write_header){ 02911 ret = s->oformat->write_header(s); 02912 if (ret < 0) 02913 goto fail; 02914 } 02915 02916 /* init PTS generation */ 02917 for(i=0;i<s->nb_streams;i++) { 02918 int64_t den = AV_NOPTS_VALUE; 02919 st = s->streams[i]; 02920 02921 switch (st->codec->codec_type) { 02922 case AVMEDIA_TYPE_AUDIO: 02923 den = (int64_t)st->time_base.num * st->codec->sample_rate; 02924 break; 02925 case AVMEDIA_TYPE_VIDEO: 02926 den = (int64_t)st->time_base.num * st->codec->time_base.den; 02927 break; 02928 default: 02929 break; 02930 } 02931 if (den != AV_NOPTS_VALUE) { 02932 if (den <= 0) { 02933 ret = AVERROR_INVALIDDATA; 02934 goto fail; 02935 } 02936 av_frac_init(&st->pts, 0, 0, den); 02937 } 02938 } 02939 02940 if (options) { 02941 av_dict_free(options); 02942 *options = tmp; 02943 } 02944 return 0; 02945 fail: 02946 av_dict_free(&tmp); 02947 return ret; 02948 } 02949 02950 //FIXME merge with compute_pkt_fields 02951 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){ 02952 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames); 02953 int num, den, frame_size, i; 02954 02955 av_dlog(s, "compute_pkt_fields2: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", 02956 pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index); 02957 02958 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE) 02959 return AVERROR(EINVAL);*/ 02960 02961 /* duration field */ 02962 if (pkt->duration == 0) { 02963 compute_frame_duration(&num, &den, st, NULL, pkt); 02964 if (den && num) { 02965 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num); 02966 } 02967 } 02968 02969 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0) 02970 pkt->pts= pkt->dts; 02971 02972 //XXX/FIXME this is a temporary hack until all encoders output pts 02973 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){ 02974 pkt->dts= 02975 // pkt->pts= st->cur_dts; 02976 pkt->pts= st->pts.val; 02977 } 02978 02979 //calculate dts from pts 02980 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){ 02981 st->pts_buffer[0]= pkt->pts; 02982 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++) 02983 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration; 02984 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++) 02985 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]); 02986 02987 pkt->dts= st->pts_buffer[0]; 02988 } 02989 02990 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){ 02991 av_log(s, AV_LOG_ERROR, 02992 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n", 02993 st->index, st->cur_dts, pkt->dts); 02994 return AVERROR(EINVAL); 02995 } 02996 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){ 02997 av_log(s, AV_LOG_ERROR, "pts < dts in stream %d\n", st->index); 02998 return AVERROR(EINVAL); 02999 } 03000 03001 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts); 03002 st->cur_dts= pkt->dts; 03003 st->pts.val= pkt->dts; 03004 03005 /* update pts */ 03006 switch (st->codec->codec_type) { 03007 case AVMEDIA_TYPE_AUDIO: 03008 frame_size = get_audio_frame_size(st->codec, pkt->size); 03009 03010 /* HACK/FIXME, we skip the initial 0 size packets as they are most 03011 likely equal to the encoder delay, but it would be better if we 03012 had the real timestamps from the encoder */ 03013 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) { 03014 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); 03015 } 03016 break; 03017 case AVMEDIA_TYPE_VIDEO: 03018 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num); 03019 break; 03020 default: 03021 break; 03022 } 03023 return 0; 03024 } 03025 03026 int av_write_frame(AVFormatContext *s, AVPacket *pkt) 03027 { 03028 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt); 03029 03030 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 03031 return ret; 03032 03033 ret= s->oformat->write_packet(s, pkt); 03034 return ret; 03035 } 03036 03037 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt, 03038 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *)) 03039 { 03040 AVPacketList **next_point, *this_pktl; 03041 03042 this_pktl = av_mallocz(sizeof(AVPacketList)); 03043 this_pktl->pkt= *pkt; 03044 pkt->destruct= NULL; // do not free original but only the copy 03045 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory 03046 03047 if(s->streams[pkt->stream_index]->last_in_packet_buffer){ 03048 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next); 03049 }else 03050 next_point = &s->packet_buffer; 03051 03052 if(*next_point){ 03053 if(compare(s, &s->packet_buffer_end->pkt, pkt)){ 03054 while(!compare(s, &(*next_point)->pkt, pkt)){ 03055 next_point= &(*next_point)->next; 03056 } 03057 goto next_non_null; 03058 }else{ 03059 next_point = &(s->packet_buffer_end->next); 03060 } 03061 } 03062 assert(!*next_point); 03063 03064 s->packet_buffer_end= this_pktl; 03065 next_non_null: 03066 03067 this_pktl->next= *next_point; 03068 03069 s->streams[pkt->stream_index]->last_in_packet_buffer= 03070 *next_point= this_pktl; 03071 } 03072 03073 static int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt) 03074 { 03075 AVStream *st = s->streams[ pkt ->stream_index]; 03076 AVStream *st2= s->streams[ next->stream_index]; 03077 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts, 03078 st->time_base); 03079 03080 if (comp == 0) 03081 return pkt->stream_index < next->stream_index; 03082 return comp > 0; 03083 } 03084 03085 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){ 03086 AVPacketList *pktl; 03087 int stream_count=0; 03088 int i; 03089 03090 if(pkt){ 03091 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts); 03092 } 03093 03094 for(i=0; i < s->nb_streams; i++) 03095 stream_count+= !!s->streams[i]->last_in_packet_buffer; 03096 03097 if(stream_count && (s->nb_streams == stream_count || flush)){ 03098 pktl= s->packet_buffer; 03099 *out= pktl->pkt; 03100 03101 s->packet_buffer= pktl->next; 03102 if(!s->packet_buffer) 03103 s->packet_buffer_end= NULL; 03104 03105 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl) 03106 s->streams[out->stream_index]->last_in_packet_buffer= NULL; 03107 av_freep(&pktl); 03108 return 1; 03109 }else{ 03110 av_init_packet(out); 03111 return 0; 03112 } 03113 } 03114 03124 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){ 03125 if(s->oformat->interleave_packet) 03126 return s->oformat->interleave_packet(s, out, in, flush); 03127 else 03128 return av_interleave_packet_per_dts(s, out, in, flush); 03129 } 03130 03131 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){ 03132 AVStream *st= s->streams[ pkt->stream_index]; 03133 int ret; 03134 03135 //FIXME/XXX/HACK drop zero sized packets 03136 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0) 03137 return 0; 03138 03139 av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n", 03140 pkt->size, pkt->dts, pkt->pts); 03141 if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 03142 return ret; 03143 03144 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) 03145 return AVERROR(EINVAL); 03146 03147 for(;;){ 03148 AVPacket opkt; 03149 int ret= av_interleave_packet(s, &opkt, pkt, 0); 03150 if(ret<=0) //FIXME cleanup needed for ret<0 ? 03151 return ret; 03152 03153 ret= s->oformat->write_packet(s, &opkt); 03154 03155 av_free_packet(&opkt); 03156 pkt= NULL; 03157 03158 if(ret<0) 03159 return ret; 03160 } 03161 } 03162 03163 int av_write_trailer(AVFormatContext *s) 03164 { 03165 int ret, i; 03166 03167 for(;;){ 03168 AVPacket pkt; 03169 ret= av_interleave_packet(s, &pkt, NULL, 1); 03170 if(ret<0) //FIXME cleanup needed for ret<0 ? 03171 goto fail; 03172 if(!ret) 03173 break; 03174 03175 ret= s->oformat->write_packet(s, &pkt); 03176 03177 av_free_packet(&pkt); 03178 03179 if(ret<0) 03180 goto fail; 03181 } 03182 03183 if(s->oformat->write_trailer) 03184 ret = s->oformat->write_trailer(s); 03185 fail: 03186 for(i=0;i<s->nb_streams;i++) { 03187 av_freep(&s->streams[i]->priv_data); 03188 av_freep(&s->streams[i]->index_entries); 03189 } 03190 if (s->iformat && s->iformat->priv_class) 03191 av_opt_free(s->priv_data); 03192 av_freep(&s->priv_data); 03193 return ret; 03194 } 03195 03196 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx) 03197 { 03198 int i, j; 03199 AVProgram *program=NULL; 03200 void *tmp; 03201 03202 if (idx >= ac->nb_streams) { 03203 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx); 03204 return; 03205 } 03206 03207 for(i=0; i<ac->nb_programs; i++){ 03208 if(ac->programs[i]->id != progid) 03209 continue; 03210 program = ac->programs[i]; 03211 for(j=0; j<program->nb_stream_indexes; j++) 03212 if(program->stream_index[j] == idx) 03213 return; 03214 03215 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1)); 03216 if(!tmp) 03217 return; 03218 program->stream_index = tmp; 03219 program->stream_index[program->nb_stream_indexes++] = idx; 03220 return; 03221 } 03222 } 03223 03224 static void print_fps(double d, const char *postfix){ 03225 uint64_t v= lrintf(d*100); 03226 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix); 03227 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix); 03228 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix); 03229 } 03230 03231 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent) 03232 { 03233 if(m && !(m->count == 1 && av_dict_get(m, "language", NULL, 0))){ 03234 AVDictionaryEntry *tag=NULL; 03235 03236 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent); 03237 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) { 03238 if(strcmp("language", tag->key)) 03239 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value); 03240 } 03241 } 03242 } 03243 03244 /* "user interface" functions */ 03245 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output) 03246 { 03247 char buf[256]; 03248 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags); 03249 AVStream *st = ic->streams[i]; 03250 int g = av_gcd(st->time_base.num, st->time_base.den); 03251 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0); 03252 avcodec_string(buf, sizeof(buf), st->codec, is_output); 03253 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i); 03254 /* the pid is an important information, so we display it */ 03255 /* XXX: add a generic system */ 03256 if (flags & AVFMT_SHOW_IDS) 03257 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id); 03258 if (lang) 03259 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value); 03260 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g); 03261 av_log(NULL, AV_LOG_INFO, ": %s", buf); 03262 if (st->sample_aspect_ratio.num && // default 03263 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) { 03264 AVRational display_aspect_ratio; 03265 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, 03266 st->codec->width*st->sample_aspect_ratio.num, 03267 st->codec->height*st->sample_aspect_ratio.den, 03268 1024*1024); 03269 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d", 03270 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den, 03271 display_aspect_ratio.num, display_aspect_ratio.den); 03272 } 03273 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){ 03274 if(st->avg_frame_rate.den && st->avg_frame_rate.num) 03275 print_fps(av_q2d(st->avg_frame_rate), "fps"); 03276 if(st->r_frame_rate.den && st->r_frame_rate.num) 03277 print_fps(av_q2d(st->r_frame_rate), "tbr"); 03278 if(st->time_base.den && st->time_base.num) 03279 print_fps(1/av_q2d(st->time_base), "tbn"); 03280 if(st->codec->time_base.den && st->codec->time_base.num) 03281 print_fps(1/av_q2d(st->codec->time_base), "tbc"); 03282 } 03283 if (st->disposition & AV_DISPOSITION_DEFAULT) 03284 av_log(NULL, AV_LOG_INFO, " (default)"); 03285 if (st->disposition & AV_DISPOSITION_DUB) 03286 av_log(NULL, AV_LOG_INFO, " (dub)"); 03287 if (st->disposition & AV_DISPOSITION_ORIGINAL) 03288 av_log(NULL, AV_LOG_INFO, " (original)"); 03289 if (st->disposition & AV_DISPOSITION_COMMENT) 03290 av_log(NULL, AV_LOG_INFO, " (comment)"); 03291 if (st->disposition & AV_DISPOSITION_LYRICS) 03292 av_log(NULL, AV_LOG_INFO, " (lyrics)"); 03293 if (st->disposition & AV_DISPOSITION_KARAOKE) 03294 av_log(NULL, AV_LOG_INFO, " (karaoke)"); 03295 if (st->disposition & AV_DISPOSITION_FORCED) 03296 av_log(NULL, AV_LOG_INFO, " (forced)"); 03297 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED) 03298 av_log(NULL, AV_LOG_INFO, " (hearing impaired)"); 03299 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED) 03300 av_log(NULL, AV_LOG_INFO, " (visual impaired)"); 03301 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS) 03302 av_log(NULL, AV_LOG_INFO, " (clean effects)"); 03303 av_log(NULL, AV_LOG_INFO, "\n"); 03304 dump_metadata(NULL, st->metadata, " "); 03305 } 03306 03307 #if FF_API_DUMP_FORMAT 03308 void dump_format(AVFormatContext *ic, 03309 int index, 03310 const char *url, 03311 int is_output) 03312 { 03313 av_dump_format(ic, index, url, is_output); 03314 } 03315 #endif 03316 03317 void av_dump_format(AVFormatContext *ic, 03318 int index, 03319 const char *url, 03320 int is_output) 03321 { 03322 int i; 03323 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL; 03324 if (ic->nb_streams && !printed) 03325 return; 03326 03327 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n", 03328 is_output ? "Output" : "Input", 03329 index, 03330 is_output ? ic->oformat->name : ic->iformat->name, 03331 is_output ? "to" : "from", url); 03332 dump_metadata(NULL, ic->metadata, " "); 03333 if (!is_output) { 03334 av_log(NULL, AV_LOG_INFO, " Duration: "); 03335 if (ic->duration != AV_NOPTS_VALUE) { 03336 int hours, mins, secs, us; 03337 secs = ic->duration / AV_TIME_BASE; 03338 us = ic->duration % AV_TIME_BASE; 03339 mins = secs / 60; 03340 secs %= 60; 03341 hours = mins / 60; 03342 mins %= 60; 03343 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs, 03344 (100 * us) / AV_TIME_BASE); 03345 } else { 03346 av_log(NULL, AV_LOG_INFO, "N/A"); 03347 } 03348 if (ic->start_time != AV_NOPTS_VALUE) { 03349 int secs, us; 03350 av_log(NULL, AV_LOG_INFO, ", start: "); 03351 secs = ic->start_time / AV_TIME_BASE; 03352 us = abs(ic->start_time % AV_TIME_BASE); 03353 av_log(NULL, AV_LOG_INFO, "%d.%06d", 03354 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE)); 03355 } 03356 av_log(NULL, AV_LOG_INFO, ", bitrate: "); 03357 if (ic->bit_rate) { 03358 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000); 03359 } else { 03360 av_log(NULL, AV_LOG_INFO, "N/A"); 03361 } 03362 av_log(NULL, AV_LOG_INFO, "\n"); 03363 } 03364 for (i = 0; i < ic->nb_chapters; i++) { 03365 AVChapter *ch = ic->chapters[i]; 03366 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i); 03367 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base)); 03368 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base)); 03369 03370 dump_metadata(NULL, ch->metadata, " "); 03371 } 03372 if(ic->nb_programs) { 03373 int j, k, total = 0; 03374 for(j=0; j<ic->nb_programs; j++) { 03375 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata, 03376 "name", NULL, 0); 03377 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id, 03378 name ? name->value : ""); 03379 dump_metadata(NULL, ic->programs[j]->metadata, " "); 03380 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) { 03381 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output); 03382 printed[ic->programs[j]->stream_index[k]] = 1; 03383 } 03384 total += ic->programs[j]->nb_stream_indexes; 03385 } 03386 if (total < ic->nb_streams) 03387 av_log(NULL, AV_LOG_INFO, " No Program\n"); 03388 } 03389 for(i=0;i<ic->nb_streams;i++) 03390 if (!printed[i]) 03391 dump_stream_format(ic, i, index, is_output); 03392 03393 av_free(printed); 03394 } 03395 03396 int64_t av_gettime(void) 03397 { 03398 struct timeval tv; 03399 gettimeofday(&tv,NULL); 03400 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec; 03401 } 03402 03403 uint64_t ff_ntp_time(void) 03404 { 03405 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US; 03406 } 03407 03408 #if FF_API_PARSE_DATE 03409 #include "libavutil/parseutils.h" 03410 03411 int64_t parse_date(const char *timestr, int duration) 03412 { 03413 int64_t timeval; 03414 av_parse_time(&timeval, timestr, duration); 03415 return timeval; 03416 } 03417 #endif 03418 03419 #if FF_API_FIND_INFO_TAG 03420 #include "libavutil/parseutils.h" 03421 03422 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info) 03423 { 03424 return av_find_info_tag(arg, arg_size, tag1, info); 03425 } 03426 #endif 03427 03428 int av_get_frame_filename(char *buf, int buf_size, 03429 const char *path, int number) 03430 { 03431 const char *p; 03432 char *q, buf1[20], c; 03433 int nd, len, percentd_found; 03434 03435 q = buf; 03436 p = path; 03437 percentd_found = 0; 03438 for(;;) { 03439 c = *p++; 03440 if (c == '\0') 03441 break; 03442 if (c == '%') { 03443 do { 03444 nd = 0; 03445 while (isdigit(*p)) { 03446 nd = nd * 10 + *p++ - '0'; 03447 } 03448 c = *p++; 03449 } while (isdigit(c)); 03450 03451 switch(c) { 03452 case '%': 03453 goto addchar; 03454 case 'd': 03455 if (percentd_found) 03456 goto fail; 03457 percentd_found = 1; 03458 snprintf(buf1, sizeof(buf1), "%0*d", nd, number); 03459 len = strlen(buf1); 03460 if ((q - buf + len) > buf_size - 1) 03461 goto fail; 03462 memcpy(q, buf1, len); 03463 q += len; 03464 break; 03465 default: 03466 goto fail; 03467 } 03468 } else { 03469 addchar: 03470 if ((q - buf) < buf_size - 1) 03471 *q++ = c; 03472 } 03473 } 03474 if (!percentd_found) 03475 goto fail; 03476 *q = '\0'; 03477 return 0; 03478 fail: 03479 *q = '\0'; 03480 return -1; 03481 } 03482 03483 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size) 03484 { 03485 int len, i, j, c; 03486 #undef fprintf 03487 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) 03488 03489 for(i=0;i<size;i+=16) { 03490 len = size - i; 03491 if (len > 16) 03492 len = 16; 03493 PRINT("%08x ", i); 03494 for(j=0;j<16;j++) { 03495 if (j < len) 03496 PRINT(" %02x", buf[i+j]); 03497 else 03498 PRINT(" "); 03499 } 03500 PRINT(" "); 03501 for(j=0;j<len;j++) { 03502 c = buf[i+j]; 03503 if (c < ' ' || c > '~') 03504 c = '.'; 03505 PRINT("%c", c); 03506 } 03507 PRINT("\n"); 03508 } 03509 #undef PRINT 03510 } 03511 03512 void av_hex_dump(FILE *f, uint8_t *buf, int size) 03513 { 03514 hex_dump_internal(NULL, f, 0, buf, size); 03515 } 03516 03517 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size) 03518 { 03519 hex_dump_internal(avcl, NULL, level, buf, size); 03520 } 03521 03522 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base) 03523 { 03524 #undef fprintf 03525 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0) 03526 PRINT("stream #%d:\n", pkt->stream_index); 03527 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0)); 03528 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base)); 03529 /* DTS is _always_ valid after av_read_frame() */ 03530 PRINT(" dts="); 03531 if (pkt->dts == AV_NOPTS_VALUE) 03532 PRINT("N/A"); 03533 else 03534 PRINT("%0.3f", pkt->dts * av_q2d(time_base)); 03535 /* PTS may not be known if B-frames are present. */ 03536 PRINT(" pts="); 03537 if (pkt->pts == AV_NOPTS_VALUE) 03538 PRINT("N/A"); 03539 else 03540 PRINT("%0.3f", pkt->pts * av_q2d(time_base)); 03541 PRINT("\n"); 03542 PRINT(" size=%d\n", pkt->size); 03543 #undef PRINT 03544 if (dump_payload) 03545 av_hex_dump(f, pkt->data, pkt->size); 03546 } 03547 03548 #if FF_API_PKT_DUMP 03549 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload) 03550 { 03551 AVRational tb = { 1, AV_TIME_BASE }; 03552 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, tb); 03553 } 03554 #endif 03555 03556 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st) 03557 { 03558 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base); 03559 } 03560 03561 #if FF_API_PKT_DUMP 03562 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload) 03563 { 03564 AVRational tb = { 1, AV_TIME_BASE }; 03565 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, tb); 03566 } 03567 #endif 03568 03569 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload, 03570 AVStream *st) 03571 { 03572 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base); 03573 } 03574 03575 void av_url_split(char *proto, int proto_size, 03576 char *authorization, int authorization_size, 03577 char *hostname, int hostname_size, 03578 int *port_ptr, 03579 char *path, int path_size, 03580 const char *url) 03581 { 03582 const char *p, *ls, *at, *col, *brk; 03583 03584 if (port_ptr) *port_ptr = -1; 03585 if (proto_size > 0) proto[0] = 0; 03586 if (authorization_size > 0) authorization[0] = 0; 03587 if (hostname_size > 0) hostname[0] = 0; 03588 if (path_size > 0) path[0] = 0; 03589 03590 /* parse protocol */ 03591 if ((p = strchr(url, ':'))) { 03592 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url)); 03593 p++; /* skip ':' */ 03594 if (*p == '/') p++; 03595 if (*p == '/') p++; 03596 } else { 03597 /* no protocol means plain filename */ 03598 av_strlcpy(path, url, path_size); 03599 return; 03600 } 03601 03602 /* separate path from hostname */ 03603 ls = strchr(p, '/'); 03604 if(!ls) 03605 ls = strchr(p, '?'); 03606 if(ls) 03607 av_strlcpy(path, ls, path_size); 03608 else 03609 ls = &p[strlen(p)]; // XXX 03610 03611 /* the rest is hostname, use that to parse auth/port */ 03612 if (ls != p) { 03613 /* authorization (user[:pass]@hostname) */ 03614 if ((at = strchr(p, '@')) && at < ls) { 03615 av_strlcpy(authorization, p, 03616 FFMIN(authorization_size, at + 1 - p)); 03617 p = at + 1; /* skip '@' */ 03618 } 03619 03620 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) { 03621 /* [host]:port */ 03622 av_strlcpy(hostname, p + 1, 03623 FFMIN(hostname_size, brk - p)); 03624 if (brk[1] == ':' && port_ptr) 03625 *port_ptr = atoi(brk + 2); 03626 } else if ((col = strchr(p, ':')) && col < ls) { 03627 av_strlcpy(hostname, p, 03628 FFMIN(col + 1 - p, hostname_size)); 03629 if (port_ptr) *port_ptr = atoi(col + 1); 03630 } else 03631 av_strlcpy(hostname, p, 03632 FFMIN(ls + 1 - p, hostname_size)); 03633 } 03634 } 03635 03636 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase) 03637 { 03638 int i; 03639 static const char hex_table_uc[16] = { '0', '1', '2', '3', 03640 '4', '5', '6', '7', 03641 '8', '9', 'A', 'B', 03642 'C', 'D', 'E', 'F' }; 03643 static const char hex_table_lc[16] = { '0', '1', '2', '3', 03644 '4', '5', '6', '7', 03645 '8', '9', 'a', 'b', 03646 'c', 'd', 'e', 'f' }; 03647 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc; 03648 03649 for(i = 0; i < s; i++) { 03650 buff[i * 2] = hex_table[src[i] >> 4]; 03651 buff[i * 2 + 1] = hex_table[src[i] & 0xF]; 03652 } 03653 03654 return buff; 03655 } 03656 03657 int ff_hex_to_data(uint8_t *data, const char *p) 03658 { 03659 int c, len, v; 03660 03661 len = 0; 03662 v = 1; 03663 for (;;) { 03664 p += strspn(p, SPACE_CHARS); 03665 if (*p == '\0') 03666 break; 03667 c = toupper((unsigned char) *p++); 03668 if (c >= '0' && c <= '9') 03669 c = c - '0'; 03670 else if (c >= 'A' && c <= 'F') 03671 c = c - 'A' + 10; 03672 else 03673 break; 03674 v = (v << 4) | c; 03675 if (v & 0x100) { 03676 if (data) 03677 data[len] = v; 03678 len++; 03679 v = 1; 03680 } 03681 } 03682 return len; 03683 } 03684 03685 void av_set_pts_info(AVStream *s, int pts_wrap_bits, 03686 unsigned int pts_num, unsigned int pts_den) 03687 { 03688 AVRational new_tb; 03689 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){ 03690 if(new_tb.num != pts_num) 03691 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num); 03692 }else 03693 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index); 03694 03695 if(new_tb.num <= 0 || new_tb.den <= 0) { 03696 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase for st:%d\n", s->index); 03697 return; 03698 } 03699 s->time_base = new_tb; 03700 s->pts_wrap_bits = pts_wrap_bits; 03701 } 03702 03703 int ff_url_join(char *str, int size, const char *proto, 03704 const char *authorization, const char *hostname, 03705 int port, const char *fmt, ...) 03706 { 03707 #if CONFIG_NETWORK 03708 struct addrinfo hints, *ai; 03709 #endif 03710 03711 str[0] = '\0'; 03712 if (proto) 03713 av_strlcatf(str, size, "%s://", proto); 03714 if (authorization && authorization[0]) 03715 av_strlcatf(str, size, "%s@", authorization); 03716 #if CONFIG_NETWORK && defined(AF_INET6) 03717 /* Determine if hostname is a numerical IPv6 address, 03718 * properly escape it within [] in that case. */ 03719 memset(&hints, 0, sizeof(hints)); 03720 hints.ai_flags = AI_NUMERICHOST; 03721 if (!getaddrinfo(hostname, NULL, &hints, &ai)) { 03722 if (ai->ai_family == AF_INET6) { 03723 av_strlcat(str, "[", size); 03724 av_strlcat(str, hostname, size); 03725 av_strlcat(str, "]", size); 03726 } else { 03727 av_strlcat(str, hostname, size); 03728 } 03729 freeaddrinfo(ai); 03730 } else 03731 #endif 03732 /* Not an IPv6 address, just output the plain string. */ 03733 av_strlcat(str, hostname, size); 03734 03735 if (port >= 0) 03736 av_strlcatf(str, size, ":%d", port); 03737 if (fmt) { 03738 va_list vl; 03739 int len = strlen(str); 03740 03741 va_start(vl, fmt); 03742 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl); 03743 va_end(vl); 03744 } 03745 return strlen(str); 03746 } 03747 03748 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt, 03749 AVFormatContext *src) 03750 { 03751 AVPacket local_pkt; 03752 03753 local_pkt = *pkt; 03754 local_pkt.stream_index = dst_stream; 03755 if (pkt->pts != AV_NOPTS_VALUE) 03756 local_pkt.pts = av_rescale_q(pkt->pts, 03757 src->streams[pkt->stream_index]->time_base, 03758 dst->streams[dst_stream]->time_base); 03759 if (pkt->dts != AV_NOPTS_VALUE) 03760 local_pkt.dts = av_rescale_q(pkt->dts, 03761 src->streams[pkt->stream_index]->time_base, 03762 dst->streams[dst_stream]->time_base); 03763 return av_write_frame(dst, &local_pkt); 03764 } 03765 03766 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf, 03767 void *context) 03768 { 03769 const char *ptr = str; 03770 03771 /* Parse key=value pairs. */ 03772 for (;;) { 03773 const char *key; 03774 char *dest = NULL, *dest_end; 03775 int key_len, dest_len = 0; 03776 03777 /* Skip whitespace and potential commas. */ 03778 while (*ptr && (isspace(*ptr) || *ptr == ',')) 03779 ptr++; 03780 if (!*ptr) 03781 break; 03782 03783 key = ptr; 03784 03785 if (!(ptr = strchr(key, '='))) 03786 break; 03787 ptr++; 03788 key_len = ptr - key; 03789 03790 callback_get_buf(context, key, key_len, &dest, &dest_len); 03791 dest_end = dest + dest_len - 1; 03792 03793 if (*ptr == '\"') { 03794 ptr++; 03795 while (*ptr && *ptr != '\"') { 03796 if (*ptr == '\\') { 03797 if (!ptr[1]) 03798 break; 03799 if (dest && dest < dest_end) 03800 *dest++ = ptr[1]; 03801 ptr += 2; 03802 } else { 03803 if (dest && dest < dest_end) 03804 *dest++ = *ptr; 03805 ptr++; 03806 } 03807 } 03808 if (*ptr == '\"') 03809 ptr++; 03810 } else { 03811 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++) 03812 if (dest && dest < dest_end) 03813 *dest++ = *ptr; 03814 } 03815 if (dest) 03816 *dest = 0; 03817 } 03818 } 03819 03820 int ff_find_stream_index(AVFormatContext *s, int id) 03821 { 03822 int i; 03823 for (i = 0; i < s->nb_streams; i++) { 03824 if (s->streams[i]->id == id) 03825 return i; 03826 } 03827 return -1; 03828 } 03829 03830 void ff_make_absolute_url(char *buf, int size, const char *base, 03831 const char *rel) 03832 { 03833 char *sep; 03834 /* Absolute path, relative to the current server */ 03835 if (base && strstr(base, "://") && rel[0] == '/') { 03836 if (base != buf) 03837 av_strlcpy(buf, base, size); 03838 sep = strstr(buf, "://"); 03839 if (sep) { 03840 sep += 3; 03841 sep = strchr(sep, '/'); 03842 if (sep) 03843 *sep = '\0'; 03844 } 03845 av_strlcat(buf, rel, size); 03846 return; 03847 } 03848 /* If rel actually is an absolute url, just copy it */ 03849 if (!base || strstr(rel, "://") || rel[0] == '/') { 03850 av_strlcpy(buf, rel, size); 03851 return; 03852 } 03853 if (base != buf) 03854 av_strlcpy(buf, base, size); 03855 /* Remove the file name from the base url */ 03856 sep = strrchr(buf, '/'); 03857 if (sep) 03858 sep[1] = '\0'; 03859 else 03860 buf[0] = '\0'; 03861 while (av_strstart(rel, "../", NULL) && sep) { 03862 /* Remove the path delimiter at the end */ 03863 sep[0] = '\0'; 03864 sep = strrchr(buf, '/'); 03865 /* If the next directory name to pop off is "..", break here */ 03866 if (!strcmp(sep ? &sep[1] : buf, "..")) { 03867 /* Readd the slash we just removed */ 03868 av_strlcat(buf, "/", size); 03869 break; 03870 } 03871 /* Cut off the directory name */ 03872 if (sep) 03873 sep[1] = '\0'; 03874 else 03875 buf[0] = '\0'; 03876 rel += 3; 03877 } 03878 av_strlcat(buf, rel, size); 03879 }