avconv.c
Go to the documentation of this file.
1 /*
2  * avconv main
3  * Copyright (c) 2000-2011 The libav developers.
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "config.h"
23 #include <ctype.h>
24 #include <string.h>
25 #include <math.h>
26 #include <stdlib.h>
27 #include <errno.h>
28 #include <signal.h>
29 #include <limits.h>
30 #include <unistd.h>
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavutil/opt.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavformat/os_support.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersrc.h"
54 # include "libavfilter/vsrc_buffer.h"
55 #endif
56 
57 #if HAVE_SYS_RESOURCE_H
58 #include <sys/types.h>
59 #include <sys/time.h>
60 #include <sys/resource.h>
61 #elif HAVE_GETPROCESSTIMES
62 #include <windows.h>
63 #endif
64 #if HAVE_GETPROCESSMEMORYINFO
65 #include <windows.h>
66 #include <psapi.h>
67 #endif
68 
69 #if HAVE_SYS_SELECT_H
70 #include <sys/select.h>
71 #endif
72 
73 #include <time.h>
74 
75 #include "cmdutils.h"
76 
77 #include "libavutil/avassert.h"
78 
79 #define VSYNC_AUTO -1
80 #define VSYNC_PASSTHROUGH 0
81 #define VSYNC_CFR 1
82 #define VSYNC_VFR 2
83 
84 const char program_name[] = "avconv";
85 const int program_birth_year = 2000;
86 
87 /* select an input stream for an output stream */
88 typedef struct StreamMap {
89  int disabled;
94 } StreamMap;
95 
99 typedef struct MetadataMap {
100  int file;
101  char type;
102  int index;
103 } MetadataMap;
104 
105 static const OptionDef options[];
106 
107 static int video_discard = 0;
108 static int same_quant = 0;
109 static int do_deinterlace = 0;
110 static int intra_dc_precision = 8;
111 static int qp_hist = 0;
112 
113 static int file_overwrite = 0;
114 static int do_benchmark = 0;
115 static int do_hex_dump = 0;
116 static int do_pkt_dump = 0;
117 static int do_pass = 0;
120 static int audio_sync_method = 0;
121 static float audio_drift_threshold = 0.1;
122 static int copy_ts = 0;
123 static int copy_tb = 1;
124 static int opt_shortest = 0;
125 static char *vstats_filename;
126 static FILE *vstats_file;
127 
128 static int audio_volume = 256;
129 
130 static int exit_on_error = 0;
131 static int using_stdin = 0;
132 static int64_t video_size = 0;
133 static int64_t audio_size = 0;
134 static int64_t extra_size = 0;
135 static int nb_frames_dup = 0;
136 static int nb_frames_drop = 0;
137 static int input_sync;
138 
139 static float dts_delta_threshold = 10;
140 
141 static int print_stats = 1;
142 
143 static uint8_t *audio_buf;
144 static unsigned int allocated_audio_buf_size;
145 
146 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
147 
148 typedef struct FrameBuffer {
149  uint8_t *base[4];
150  uint8_t *data[4];
151  int linesize[4];
152 
153  int h, w;
155 
156  int refcount;
157  struct InputStream *ist;
158  struct FrameBuffer *next;
159 } FrameBuffer;
160 
161 typedef struct InputStream {
164  int discard; /* true if stream data should be discarded */
165  int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
169 
170  int64_t start; /* time when read started */
171  int64_t next_pts; /* synthetic pts for cases where pkt.pts
172  is not defined */
173  int64_t pts; /* current pts */
175  double ts_scale;
176  int is_start; /* is 1 at the start and after a discontinuity */
179 
180  /* a pool of free buffers for decoded data */
182 } InputStream;
183 
184 typedef struct InputFile {
186  int eof_reached; /* true if eof reached */
187  int ist_index; /* index of first stream in ist_table */
188  int buffer_size; /* current total buffer size */
189  int64_t ts_offset;
190  int nb_streams; /* number of stream that avconv is aware of; may be different
191  from ctx.nb_streams if new streams appear during av_read_frame() */
192  int rate_emu;
193 } InputFile;
194 
195 typedef struct OutputStream {
196  int file_index; /* file index */
197  int index; /* stream index in the output file */
198  int source_index; /* InputStream index */
199  AVStream *st; /* stream in the output file */
200  int encoding_needed; /* true if encoding needed for this stream */
202  /* input pts and corresponding output pts
203  for A/V sync */
204  // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
205  struct InputStream *sync_ist; /* input stream to sync against */
206  int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
209  int64_t max_frames;
211 
212  /* video only */
214  AVFrame pict_tmp; /* temporary image for resampling */
215  struct SwsContext *img_resample_ctx; /* for image resampling */
222 
224 
225  /* forced key frames */
226  int64_t *forced_kf_pts;
230 
231  /* audio only */
233  ReSampleContext *resample; /* for audio resampling */
239  AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
240  FILE *logfile;
241 
242 #if CONFIG_AVFILTER
243  AVFilterContext *output_video_filter;
244  AVFilterContext *input_video_filter;
245  AVFilterBufferRef *picref;
246  char *avfilter;
247  AVFilterGraph *graph;
248 #endif
249 
250  int64_t sws_flags;
254  const char *attachment_filename;
256 } OutputStream;
257 
258 
259 typedef struct OutputFile {
262  int ost_index; /* index of the first stream in output_streams */
263  int64_t recording_time; /* desired length of the resulting file in microseconds */
264  int64_t start_time; /* start time in microseconds */
265  uint64_t limit_filesize;
266 } OutputFile;
267 
269 static int nb_input_streams = 0;
271 static int nb_input_files = 0;
272 
274 static int nb_output_streams = 0;
276 static int nb_output_files = 0;
277 
278 typedef struct OptionsContext {
279  /* input/output options */
280  int64_t start_time;
281  const char *format;
282 
295 
296  /* input options */
298  int rate_emu;
299 
304 
305  /* output options */
308  /* first item specifies output metadata, second is input */
314  const char **attachments;
316 
318 
319  int64_t recording_time;
320  uint64_t limit_filesize;
321  float mux_preload;
323 
328 
329  /* indexed by output file stream index */
332 
365 #if CONFIG_AVFILTER
367  int nb_filters;
368 #endif
370 
371 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
372 {\
373  int i, ret;\
374  for (i = 0; i < o->nb_ ## name; i++) {\
375  char *spec = o->name[i].specifier;\
376  if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
377  outvar = o->name[i].u.type;\
378  else if (ret < 0)\
379  exit_program(1);\
380  }\
381 }
382 
384 {
385  const OptionDef *po = options;
386 
387  /* all OPT_SPEC and OPT_STRING can be freed in generic way */
388  while (po->name) {
389  void *dst = (uint8_t*)o + po->u.off;
390 
391  if (po->flags & OPT_SPEC) {
392  SpecifierOpt **so = dst;
393  int i, *count = (int*)(so + 1);
394  for (i = 0; i < *count; i++) {
395  av_freep(&(*so)[i].specifier);
396  if (po->flags & OPT_STRING)
397  av_freep(&(*so)[i].u.str);
398  }
399  av_freep(so);
400  *count = 0;
401  } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
402  av_freep(dst);
403  po++;
404  }
405 
406  av_freep(&o->stream_maps);
408  av_freep(&o->streamid_map);
409 
410  memset(o, 0, sizeof(*o));
411 
412  o->mux_max_delay = 0.7;
415  o->chapters_input_file = INT_MAX;
416 
417  uninit_opts();
418  init_opts();
419 }
420 
421 static int alloc_buffer(InputStream *ist, FrameBuffer **pbuf)
422 {
423  AVCodecContext *s = ist->st->codec;
424  FrameBuffer *buf = av_mallocz(sizeof(*buf));
425  int ret;
426  const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
427  int h_chroma_shift, v_chroma_shift;
428  int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
429  int w = s->width, h = s->height;
430 
431  if (!buf)
432  return AVERROR(ENOMEM);
433 
434  if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
435  w += 2*edge;
436  h += 2*edge;
437  }
438 
439  avcodec_align_dimensions(s, &w, &h);
440  if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
441  s->pix_fmt, 32)) < 0) {
442  av_freep(&buf);
443  return ret;
444  }
445  /* XXX this shouldn't be needed, but some tests break without this line
446  * those decoders are buggy and need to be fixed.
447  * the following tests fail:
448  * bethsoft-vid, cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
449  */
450  memset(buf->base[0], 128, ret);
451 
452  avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
453  for (int i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
454  const int h_shift = i==0 ? 0 : h_chroma_shift;
455  const int v_shift = i==0 ? 0 : v_chroma_shift;
456  if (s->flags & CODEC_FLAG_EMU_EDGE)
457  buf->data[i] = buf->base[i];
458  else if (buf->base[i])
459  buf->data[i] = buf->base[i] +
460  FFALIGN((buf->linesize[i]*edge >> v_shift) +
461  (pixel_size*edge >> h_shift), 32);
462  }
463  buf->w = s->width;
464  buf->h = s->height;
465  buf->pix_fmt = s->pix_fmt;
466  buf->ist = ist;
467 
468  *pbuf = buf;
469  return 0;
470 }
471 
472 static void free_buffer_pool(InputStream *ist)
473 {
474  FrameBuffer *buf = ist->buffer_pool;
475  while (buf) {
476  ist->buffer_pool = buf->next;
477  av_freep(&buf->base[0]);
478  av_free(buf);
479  buf = ist->buffer_pool;
480  }
481 }
482 
483 static void unref_buffer(InputStream *ist, FrameBuffer *buf)
484 {
485  av_assert0(buf->refcount);
486  buf->refcount--;
487  if (!buf->refcount) {
488  buf->next = ist->buffer_pool;
489  ist->buffer_pool = buf;
490  }
491 }
492 
494 {
495  InputStream *ist = s->opaque;
496  FrameBuffer *buf;
497  int ret, i;
498 
499  if (!ist->buffer_pool && (ret = alloc_buffer(ist, &ist->buffer_pool)) < 0)
500  return ret;
501 
502  buf = ist->buffer_pool;
503  ist->buffer_pool = buf->next;
504  buf->next = NULL;
505  if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
506  av_freep(&buf->base[0]);
507  av_free(buf);
508  if ((ret = alloc_buffer(ist, &buf)) < 0)
509  return ret;
510  }
511  buf->refcount++;
512 
513  frame->opaque = buf;
514  frame->type = FF_BUFFER_TYPE_USER;
515  frame->extended_data = frame->data;
516  frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
517 
518  for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
519  frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
520  frame->data[i] = buf->data[i];
521  frame->linesize[i] = buf->linesize[i];
522  }
523 
524  return 0;
525 }
526 
528 {
529  InputStream *ist = s->opaque;
530  FrameBuffer *buf = frame->opaque;
531  int i;
532 
533  for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
534  frame->data[i] = NULL;
535 
536  unref_buffer(ist, buf);
537 }
538 
540 {
541  FrameBuffer *buf = fb->priv;
542  av_free(fb);
543  unref_buffer(buf->ist, buf);
544 }
545 
546 #if CONFIG_AVFILTER
547 
548 static int configure_video_filters(InputStream *ist, OutputStream *ost)
549 {
550  AVFilterContext *last_filter, *filter;
552  AVCodecContext *codec = ost->st->codec;
553  AVCodecContext *icodec = ist->st->codec;
554  AVSinkContext avsink_ctx = { .pix_fmt = codec->pix_fmt };
555  AVRational sample_aspect_ratio;
556  char args[255];
557  int ret;
558 
559  ost->graph = avfilter_graph_alloc();
560 
561  if (ist->st->sample_aspect_ratio.num) {
562  sample_aspect_ratio = ist->st->sample_aspect_ratio;
563  } else
564  sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
565 
566  snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
567  ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
568  sample_aspect_ratio.num, sample_aspect_ratio.den);
569 
570  ret = avfilter_graph_create_filter(&ost->input_video_filter, avfilter_get_by_name("buffer"),
571  "src", args, NULL, ost->graph);
572  if (ret < 0)
573  return ret;
574  ret = avfilter_graph_create_filter(&ost->output_video_filter, &avsink,
575  "out", NULL, &avsink_ctx, ost->graph);
576  if (ret < 0)
577  return ret;
578  last_filter = ost->input_video_filter;
579 
580  if (codec->width != icodec->width || codec->height != icodec->height) {
581  snprintf(args, 255, "%d:%d:flags=0x%X",
582  codec->width,
583  codec->height,
584  (unsigned)ost->sws_flags);
585  if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
586  NULL, args, NULL, ost->graph)) < 0)
587  return ret;
588  if ((ret = avfilter_link(last_filter, 0, filter, 0)) < 0)
589  return ret;
590  last_filter = filter;
591  }
592 
593  snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
594  ost->graph->scale_sws_opts = av_strdup(args);
595 
596  if (ost->avfilter) {
597  AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
598  AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
599 
600  outputs->name = av_strdup("in");
601  outputs->filter_ctx = last_filter;
602  outputs->pad_idx = 0;
603  outputs->next = NULL;
604 
605  inputs->name = av_strdup("out");
606  inputs->filter_ctx = ost->output_video_filter;
607  inputs->pad_idx = 0;
608  inputs->next = NULL;
609 
610  if ((ret = avfilter_graph_parse(ost->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
611  return ret;
612  } else {
613  if ((ret = avfilter_link(last_filter, 0, ost->output_video_filter, 0)) < 0)
614  return ret;
615  }
616 
617  if ((ret = avfilter_graph_config(ost->graph, NULL)) < 0)
618  return ret;
619 
620  codec->width = ost->output_video_filter->inputs[0]->w;
621  codec->height = ost->output_video_filter->inputs[0]->h;
623  ost->frame_aspect_ratio ? // overridden by the -aspect cli option
624  av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
625  ost->output_video_filter->inputs[0]->sample_aspect_ratio;
626 
627  return 0;
628 }
629 #endif /* CONFIG_AVFILTER */
630 
631 static void term_exit(void)
632 {
633  av_log(NULL, AV_LOG_QUIET, "");
634 }
635 
636 static volatile int received_sigterm = 0;
637 static volatile int received_nb_signals = 0;
638 
639 static void
641 {
642  received_sigterm = sig;
643  received_nb_signals++;
644  term_exit();
645 }
646 
647 static void term_init(void)
648 {
649  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
650  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
651 #ifdef SIGXCPU
652  signal(SIGXCPU, sigterm_handler);
653 #endif
654 }
655 
656 static int decode_interrupt_cb(void *ctx)
657 {
658  return received_nb_signals > 1;
659 }
660 
662 
663 void exit_program(int ret)
664 {
665  int i;
666 
667  /* close files */
668  for (i = 0; i < nb_output_files; i++) {
669  AVFormatContext *s = output_files[i].ctx;
670  if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
671  avio_close(s->pb);
673  av_dict_free(&output_files[i].opts);
674  }
675  for (i = 0; i < nb_output_streams; i++) {
676  AVBitStreamFilterContext *bsfc = output_streams[i].bitstream_filters;
677  while (bsfc) {
678  AVBitStreamFilterContext *next = bsfc->next;
680  bsfc = next;
681  }
682  output_streams[i].bitstream_filters = NULL;
683 
684  if (output_streams[i].output_frame) {
685  AVFrame *frame = output_streams[i].output_frame;
686  if (frame->extended_data != frame->data)
687  av_freep(&frame->extended_data);
688  av_freep(&frame);
689  }
690 
691  av_freep(&output_streams[i].forced_keyframes);
692 #if CONFIG_AVFILTER
693  av_freep(&output_streams[i].avfilter);
694 #endif
695  }
696  for (i = 0; i < nb_input_files; i++) {
697  avformat_close_input(&input_files[i].ctx);
698  }
699  for (i = 0; i < nb_input_streams; i++) {
700  av_freep(&input_streams[i].decoded_frame);
701  av_freep(&input_streams[i].filtered_frame);
702  av_dict_free(&input_streams[i].opts);
703  free_buffer_pool(&input_streams[i]);
704  }
705 
706  if (vstats_file)
707  fclose(vstats_file);
709 
710  av_freep(&input_streams);
711  av_freep(&input_files);
712  av_freep(&output_streams);
713  av_freep(&output_files);
714 
715  uninit_opts();
718 
719 #if CONFIG_AVFILTER
720  avfilter_uninit();
721 #endif
723 
724  if (received_sigterm) {
725  av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
726  (int) received_sigterm);
727  exit (255);
728  }
729 
730  exit(ret);
731 }
732 
734 {
736  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
737  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
738  exit_program(1);
739  }
740 }
741 
742 static void assert_codec_experimental(AVCodecContext *c, int encoder)
743 {
744  const char *codec_string = encoder ? "encoder" : "decoder";
745  AVCodec *codec;
748  av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
749  "results.\nAdd '-strict experimental' if you want to use it.\n",
750  codec_string, c->codec->name);
751  codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
752  if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
753  av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
754  codec_string, codec->name);
755  exit_program(1);
756  }
757 }
758 
759 static void choose_sample_fmt(AVStream *st, AVCodec *codec)
760 {
761  if (codec && codec->sample_fmts) {
762  const enum AVSampleFormat *p = codec->sample_fmts;
763  for (; *p != -1; p++) {
764  if (*p == st->codec->sample_fmt)
765  break;
766  }
767  if (*p == -1) {
769  "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
771  codec->name,
773  st->codec->sample_fmt = codec->sample_fmts[0];
774  }
775  }
776 }
777 
785 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
786  AVCodecContext *enc)
787 {
788  /* if sample formats match or a decoder sample format has already been
789  requested, just return */
790  if (enc->sample_fmt == dec->sample_fmt ||
792  return;
793 
794  /* if decoder supports more than one output format */
795  if (dec_codec && dec_codec->sample_fmts &&
796  dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
797  dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
798  const enum AVSampleFormat *p;
799  int min_dec = -1, min_inc = -1;
800 
801  /* find a matching sample format in the encoder */
802  for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
803  if (*p == enc->sample_fmt) {
804  dec->request_sample_fmt = *p;
805  return;
806  } else if (*p > enc->sample_fmt) {
807  min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
808  } else
809  min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
810  }
811 
812  /* if none match, provide the one that matches quality closest */
813  dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
814  enc->sample_fmt - min_dec;
815  }
816 }
817 
818 static void choose_sample_rate(AVStream *st, AVCodec *codec)
819 {
820  if (codec && codec->supported_samplerates) {
821  const int *p = codec->supported_samplerates;
822  int best = 0;
823  int best_dist = INT_MAX;
824  for (; *p; p++) {
825  int dist = abs(st->codec->sample_rate - *p);
826  if (dist < best_dist) {
827  best_dist = dist;
828  best = *p;
829  }
830  }
831  if (best_dist) {
832  av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
833  }
834  st->codec->sample_rate = best;
835  }
836 }
837 
838 static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
839 {
840  if (codec && codec->pix_fmts) {
841  const enum PixelFormat *p = codec->pix_fmts;
843  if (st->codec->codec_id == CODEC_ID_MJPEG) {
845  } else if (st->codec->codec_id == CODEC_ID_LJPEG) {
848  }
849  }
850  for (; *p != PIX_FMT_NONE; p++) {
851  if (*p == st->codec->pix_fmt)
852  break;
853  }
854  if (*p == PIX_FMT_NONE) {
855  if (st->codec->pix_fmt != PIX_FMT_NONE)
857  "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
859  codec->name,
861  st->codec->pix_fmt = codec->pix_fmts[0];
862  }
863  }
864 }
865 
866 static double
868 {
869  const InputStream *ist = ost->sync_ist;
870  OutputFile *of = &output_files[ost->file_index];
871  return (double)(ist->pts - of->start_time) / AV_TIME_BASE;
872 }
873 
875 {
877  AVCodecContext *avctx = ost->st->codec;
878  int ret;
879 
880  /*
881  * Audio encoders may split the packets -- #frames in != #packets out.
882  * But there is no reordering, so we can limit the number of output packets
883  * by simply dropping them here.
884  * Counting encoded video frames needs to be done separately because of
885  * reordering, see do_video_out()
886  */
887  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
888  if (ost->frame_number >= ost->max_frames)
889  return;
890  ost->frame_number++;
891  }
892 
893  while (bsfc) {
894  AVPacket new_pkt = *pkt;
895  int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
896  &new_pkt.data, &new_pkt.size,
897  pkt->data, pkt->size,
898  pkt->flags & AV_PKT_FLAG_KEY);
899  if (a > 0) {
900  av_free_packet(pkt);
901  new_pkt.destruct = av_destruct_packet;
902  } else if (a < 0) {
903  av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
904  bsfc->filter->name, pkt->stream_index,
905  avctx->codec ? avctx->codec->name : "copy");
906  print_error("", a);
907  if (exit_on_error)
908  exit_program(1);
909  }
910  *pkt = new_pkt;
911 
912  bsfc = bsfc->next;
913  }
914 
915  ret = av_interleaved_write_frame(s, pkt);
916  if (ret < 0) {
917  print_error("av_interleaved_write_frame()", ret);
918  exit_program(1);
919  }
920 }
921 
922 static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
923 {
924  int fill_char = 0x00;
925  if (sample_fmt == AV_SAMPLE_FMT_U8)
926  fill_char = 0x80;
927  memset(buf, fill_char, size);
928 }
929 
931  const uint8_t *buf, int buf_size)
932 {
933  AVCodecContext *enc = ost->st->codec;
934  AVFrame *frame = NULL;
935  AVPacket pkt;
936  int ret, got_packet;
937 
938  av_init_packet(&pkt);
939  pkt.data = NULL;
940  pkt.size = 0;
941 
942  if (buf) {
943  if (!ost->output_frame) {
945  if (!ost->output_frame) {
946  av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n");
947  exit_program(1);
948  }
949  }
950  frame = ost->output_frame;
951  if (frame->extended_data != frame->data)
952  av_freep(&frame->extended_data);
954 
955  frame->nb_samples = buf_size /
957  if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt,
958  buf, buf_size, 1)) < 0) {
959  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
960  exit_program(1);
961  }
962  }
963 
964  got_packet = 0;
965  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
966  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
967  exit_program(1);
968  }
969 
970  if (got_packet) {
971  pkt.stream_index = ost->index;
972  if (pkt.pts != AV_NOPTS_VALUE)
973  pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
974  if (pkt.duration > 0)
975  pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
976 
977  write_frame(s, &pkt, ost);
978 
979  audio_size += pkt.size;
980  }
981 
982  if (frame)
983  ost->sync_opts += frame->nb_samples;
984 
985  return pkt.size;
986 }
987 
989  InputStream *ist, AVFrame *decoded_frame)
990 {
991  uint8_t *buftmp;
992  int64_t audio_buf_size;
993 
994  int size_out, frame_bytes, resample_changed;
995  AVCodecContext *enc = ost->st->codec;
996  AVCodecContext *dec = ist->st->codec;
997  int osize = av_get_bytes_per_sample(enc->sample_fmt);
998  int isize = av_get_bytes_per_sample(dec->sample_fmt);
999  uint8_t *buf = decoded_frame->data[0];
1000  int size = decoded_frame->nb_samples * dec->channels * isize;
1001  int64_t allocated_for_size = size;
1002 
1003 need_realloc:
1004  audio_buf_size = (allocated_for_size + isize * dec->channels - 1) / (isize * dec->channels);
1005  audio_buf_size = (audio_buf_size * enc->sample_rate + dec->sample_rate) / dec->sample_rate;
1006  audio_buf_size = audio_buf_size * 2 + 10000; // safety factors for the deprecated resampling API
1007  audio_buf_size = FFMAX(audio_buf_size, enc->frame_size);
1008  audio_buf_size *= osize * enc->channels;
1009 
1010  if (audio_buf_size > INT_MAX) {
1011  av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n");
1012  exit_program(1);
1013  }
1014 
1016  if (!audio_buf) {
1017  av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
1018  exit_program(1);
1019  }
1020 
1021  if (enc->channels != dec->channels || enc->sample_rate != dec->sample_rate)
1022  ost->audio_resample = 1;
1023 
1024  resample_changed = ost->resample_sample_fmt != dec->sample_fmt ||
1025  ost->resample_channels != dec->channels ||
1026  ost->resample_sample_rate != dec->sample_rate;
1027 
1028  if ((ost->audio_resample && !ost->resample) || resample_changed) {
1029  if (resample_changed) {
1030  av_log(NULL, AV_LOG_INFO, "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d to rate:%d fmt:%s ch:%d\n",
1031  ist->file_index, ist->st->index,
1034  ost->resample_sample_fmt = dec->sample_fmt;
1035  ost->resample_channels = dec->channels;
1036  ost->resample_sample_rate = dec->sample_rate;
1037  if (ost->resample)
1039  }
1040  /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */
1041  if (audio_sync_method <= 1 &&
1042  ost->resample_sample_fmt == enc->sample_fmt &&
1043  ost->resample_channels == enc->channels &&
1044  ost->resample_sample_rate == enc->sample_rate) {
1045  ost->resample = NULL;
1046  ost->audio_resample = 0;
1047  } else if (ost->audio_resample) {
1048  if (dec->sample_fmt != AV_SAMPLE_FMT_S16)
1049  av_log(NULL, AV_LOG_WARNING, "Using s16 intermediate sample format for resampling\n");
1051  enc->sample_rate, dec->sample_rate,
1052  enc->sample_fmt, dec->sample_fmt,
1053  16, 10, 0, 0.8);
1054  if (!ost->resample) {
1055  av_log(NULL, AV_LOG_FATAL, "Can not resample %d channels @ %d Hz to %d channels @ %d Hz\n",
1056  dec->channels, dec->sample_rate,
1057  enc->channels, enc->sample_rate);
1058  exit_program(1);
1059  }
1060  }
1061  }
1062 
1063 #define MAKE_SFMT_PAIR(a,b) ((a)+AV_SAMPLE_FMT_NB*(b))
1064  if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt &&
1065  MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt) != ost->reformat_pair) {
1066  if (ost->reformat_ctx)
1069  dec->sample_fmt, 1, NULL, 0);
1070  if (!ost->reformat_ctx) {
1071  av_log(NULL, AV_LOG_FATAL, "Cannot convert %s sample format to %s sample format\n",
1074  exit_program(1);
1075  }
1077  }
1078 
1079  if (audio_sync_method) {
1080  double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts -
1081  av_fifo_size(ost->fifo) / (enc->channels * osize);
1082  int idelta = delta * dec->sample_rate / enc->sample_rate;
1083  int byte_delta = idelta * isize * dec->channels;
1084 
1085  // FIXME resample delay
1086  if (fabs(delta) > 50) {
1087  if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) {
1088  if (byte_delta < 0) {
1089  byte_delta = FFMAX(byte_delta, -size);
1090  size += byte_delta;
1091  buf -= byte_delta;
1092  av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n",
1093  -byte_delta / (isize * dec->channels));
1094  if (!size)
1095  return;
1096  ist->is_start = 0;
1097  } else {
1098  static uint8_t *input_tmp = NULL;
1099  input_tmp = av_realloc(input_tmp, byte_delta + size);
1100 
1101  if (byte_delta > allocated_for_size - size) {
1102  allocated_for_size = byte_delta + (int64_t)size;
1103  goto need_realloc;
1104  }
1105  ist->is_start = 0;
1106 
1107  generate_silence(input_tmp, dec->sample_fmt, byte_delta);
1108  memcpy(input_tmp + byte_delta, buf, size);
1109  buf = input_tmp;
1110  size += byte_delta;
1111  av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta);
1112  }
1113  } else if (audio_sync_method > 1) {
1114  int comp = av_clip(delta, -audio_sync_method, audio_sync_method);
1115  av_assert0(ost->audio_resample);
1116  av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n",
1117  delta, comp, enc->sample_rate);
1118 // fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
1119  av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
1120  }
1121  }
1122  } else
1123  ost->sync_opts = lrintf(get_sync_ipts(ost) * enc->sample_rate) -
1124  av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
1125 
1126  if (ost->audio_resample) {
1127  buftmp = audio_buf;
1128  size_out = audio_resample(ost->resample,
1129  (short *)buftmp, (short *)buf,
1130  size / (dec->channels * isize));
1131  size_out = size_out * enc->channels * osize;
1132  } else {
1133  buftmp = buf;
1134  size_out = size;
1135  }
1136 
1137  if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt) {
1138  const void *ibuf[6] = { buftmp };
1139  void *obuf[6] = { audio_buf };
1140  int istride[6] = { isize };
1141  int ostride[6] = { osize };
1142  int len = size_out / istride[0];
1143  if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len) < 0) {
1144  printf("av_audio_convert() failed\n");
1145  if (exit_on_error)
1146  exit_program(1);
1147  return;
1148  }
1149  buftmp = audio_buf;
1150  size_out = len * osize;
1151  }
1152 
1153  /* now encode as many frames as possible */
1155  /* output resampled raw samples */
1156  if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
1157  av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n");
1158  exit_program(1);
1159  }
1160  av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
1161 
1162  frame_bytes = enc->frame_size * osize * enc->channels;
1163 
1164  while (av_fifo_size(ost->fifo) >= frame_bytes) {
1165  av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
1166  encode_audio_frame(s, ost, audio_buf, frame_bytes);
1167  }
1168  } else {
1169  encode_audio_frame(s, ost, buftmp, size_out);
1170  }
1171 }
1172 
1173 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1174 {
1175  AVCodecContext *dec;
1176  AVPicture *picture2;
1177  AVPicture picture_tmp;
1178  uint8_t *buf = 0;
1179 
1180  dec = ist->st->codec;
1181 
1182  /* deinterlace : must be done before any resize */
1183  if (do_deinterlace) {
1184  int size;
1185 
1186  /* create temporary picture */
1187  size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1188  buf = av_malloc(size);
1189  if (!buf)
1190  return;
1191 
1192  picture2 = &picture_tmp;
1193  avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1194 
1195  if (avpicture_deinterlace(picture2, picture,
1196  dec->pix_fmt, dec->width, dec->height) < 0) {
1197  /* if error, do not deinterlace */
1198  av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1199  av_free(buf);
1200  buf = NULL;
1201  picture2 = picture;
1202  }
1203  } else {
1204  picture2 = picture;
1205  }
1206 
1207  if (picture != picture2)
1208  *picture = *picture2;
1209  *bufp = buf;
1210 }
1211 
1213  OutputStream *ost,
1214  InputStream *ist,
1215  AVSubtitle *sub,
1216  int64_t pts)
1217 {
1218  static uint8_t *subtitle_out = NULL;
1219  int subtitle_out_max_size = 1024 * 1024;
1220  int subtitle_out_size, nb, i;
1221  AVCodecContext *enc;
1222  AVPacket pkt;
1223 
1224  if (pts == AV_NOPTS_VALUE) {
1225  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1226  if (exit_on_error)
1227  exit_program(1);
1228  return;
1229  }
1230 
1231  enc = ost->st->codec;
1232 
1233  if (!subtitle_out) {
1234  subtitle_out = av_malloc(subtitle_out_max_size);
1235  }
1236 
1237  /* Note: DVB subtitle need one packet to draw them and one other
1238  packet to clear them */
1239  /* XXX: signal it in the codec context ? */
1240  if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1241  nb = 2;
1242  else
1243  nb = 1;
1244 
1245  for (i = 0; i < nb; i++) {
1246  sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1247  // start_display_time is required to be 0
1248  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1249  sub->end_display_time -= sub->start_display_time;
1250  sub->start_display_time = 0;
1251  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1252  subtitle_out_max_size, sub);
1253  if (subtitle_out_size < 0) {
1254  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1255  exit_program(1);
1256  }
1257 
1258  av_init_packet(&pkt);
1259  pkt.stream_index = ost->index;
1260  pkt.data = subtitle_out;
1261  pkt.size = subtitle_out_size;
1262  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1263  if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1264  /* XXX: the pts correction is handled here. Maybe handling
1265  it in the codec would be better */
1266  if (i == 0)
1267  pkt.pts += 90 * sub->start_display_time;
1268  else
1269  pkt.pts += 90 * sub->end_display_time;
1270  }
1271  write_frame(s, &pkt, ost);
1272  }
1273 }
1274 
1275 static int bit_buffer_size = 1024 * 256;
1276 static uint8_t *bit_buffer = NULL;
1277 
1278 #if !CONFIG_AVFILTER
1280  InputStream *ist,
1281  AVFrame *in_picture,
1282  AVFrame **out_picture)
1283 {
1284  int resample_changed = 0;
1285  *out_picture = in_picture;
1286 
1287  resample_changed = ost->resample_width != in_picture->width ||
1288  ost->resample_height != in_picture->height ||
1289  ost->resample_pix_fmt != in_picture->format;
1290 
1291  if (resample_changed) {
1293  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1294  ist->file_index, ist->st->index,
1296  in_picture->width, in_picture->height, av_get_pix_fmt_name(in_picture->format));
1297  if (!ost->video_resample)
1298  ost->video_resample = 1;
1299  }
1300 
1301  if (ost->video_resample) {
1302  *out_picture = &ost->pict_tmp;
1303  if (resample_changed) {
1304  /* initialize a new scaler context */
1307  ist->st->codec->width,
1308  ist->st->codec->height,
1309  ist->st->codec->pix_fmt,
1310  ost->st->codec->width,
1311  ost->st->codec->height,
1312  ost->st->codec->pix_fmt,
1313  ost->sws_flags, NULL, NULL, NULL);
1314  if (ost->img_resample_ctx == NULL) {
1315  av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n");
1316  exit_program(1);
1317  }
1318  }
1319  sws_scale(ost->img_resample_ctx, in_picture->data, in_picture->linesize,
1320  0, ost->resample_height, (*out_picture)->data, (*out_picture)->linesize);
1321  }
1322  if (resample_changed) {
1323  ost->resample_width = in_picture->width;
1324  ost->resample_height = in_picture->height;
1325  ost->resample_pix_fmt = in_picture->format;
1326  }
1327 }
1328 #endif
1329 
1330 
1332  OutputStream *ost,
1333  InputStream *ist,
1334  AVFrame *in_picture,
1335  int *frame_size, float quality)
1336 {
1337  int nb_frames, i, ret, format_video_sync;
1338  AVFrame *final_picture;
1339  AVCodecContext *enc;
1340  double sync_ipts;
1341 
1342  enc = ost->st->codec;
1343 
1344  sync_ipts = get_sync_ipts(ost) / av_q2d(enc->time_base);
1345 
1346  /* by default, we output a single frame */
1347  nb_frames = 1;
1348 
1349  *frame_size = 0;
1350 
1351  format_video_sync = video_sync_method;
1352  if (format_video_sync == VSYNC_AUTO)
1353  format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
1355 
1356  if (format_video_sync != VSYNC_PASSTHROUGH) {
1357  double vdelta = sync_ipts - ost->sync_opts;
1358  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1359  if (vdelta < -1.1)
1360  nb_frames = 0;
1361  else if (format_video_sync == VSYNC_VFR) {
1362  if (vdelta <= -0.6) {
1363  nb_frames = 0;
1364  } else if (vdelta > 0.6)
1365  ost->sync_opts = lrintf(sync_ipts);
1366  } else if (vdelta > 1.1)
1367  nb_frames = lrintf(vdelta);
1368 //fprintf(stderr, "vdelta:%f, ost->sync_opts:%"PRId64", ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, get_sync_ipts(ost), nb_frames);
1369  if (nb_frames == 0) {
1370  ++nb_frames_drop;
1371  av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1372  } else if (nb_frames > 1) {
1373  nb_frames_dup += nb_frames - 1;
1374  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1375  }
1376  } else
1377  ost->sync_opts = lrintf(sync_ipts);
1378 
1379  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1380  if (nb_frames <= 0)
1381  return;
1382 
1383 #if !CONFIG_AVFILTER
1384  do_video_resample(ost, ist, in_picture, &final_picture);
1385 #else
1386  final_picture = in_picture;
1387 #endif
1388 
1389  /* duplicates frame if needed */
1390  for (i = 0; i < nb_frames; i++) {
1391  AVPacket pkt;
1392  av_init_packet(&pkt);
1393  pkt.stream_index = ost->index;
1394 
1395  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1396  enc->codec->id == CODEC_ID_RAWVIDEO) {
1397  /* raw pictures are written as AVPicture structure to
1398  avoid any copies. We support temporarily the older
1399  method. */
1400  enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1401  enc->coded_frame->top_field_first = in_picture->top_field_first;
1402  pkt.data = (uint8_t *)final_picture;
1403  pkt.size = sizeof(AVPicture);
1404  pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
1405  pkt.flags |= AV_PKT_FLAG_KEY;
1406 
1407  write_frame(s, &pkt, ost);
1408  } else {
1409  AVFrame big_picture;
1410 
1411  big_picture = *final_picture;
1412  /* better than nothing: use input picture interlaced
1413  settings */
1414  big_picture.interlaced_frame = in_picture->interlaced_frame;
1416  if (ost->top_field_first == -1)
1417  big_picture.top_field_first = in_picture->top_field_first;
1418  else
1419  big_picture.top_field_first = !!ost->top_field_first;
1420  }
1421 
1422  /* handles same_quant here. This is not correct because it may
1423  not be a global option */
1424  big_picture.quality = quality;
1425  if (!enc->me_threshold)
1426  big_picture.pict_type = 0;
1427 // big_picture.pts = AV_NOPTS_VALUE;
1428  big_picture.pts = ost->sync_opts;
1429 // big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);
1430 // av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
1431  if (ost->forced_kf_index < ost->forced_kf_count &&
1432  big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1433  big_picture.pict_type = AV_PICTURE_TYPE_I;
1434  ost->forced_kf_index++;
1435  }
1436  ret = avcodec_encode_video(enc,
1437  bit_buffer, bit_buffer_size,
1438  &big_picture);
1439  if (ret < 0) {
1440  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1441  exit_program(1);
1442  }
1443 
1444  if (ret > 0) {
1445  pkt.data = bit_buffer;
1446  pkt.size = ret;
1447  if (enc->coded_frame->pts != AV_NOPTS_VALUE)
1448  pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1449 /*av_log(NULL, AV_LOG_DEBUG, "encoder -> %"PRId64"/%"PRId64"\n",
1450  pkt.pts != AV_NOPTS_VALUE ? av_rescale(pkt.pts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1,
1451  pkt.dts != AV_NOPTS_VALUE ? av_rescale(pkt.dts, enc->time_base.den, AV_TIME_BASE*(int64_t)enc->time_base.num) : -1);*/
1452 
1453  if (enc->coded_frame->key_frame)
1454  pkt.flags |= AV_PKT_FLAG_KEY;
1455  write_frame(s, &pkt, ost);
1456  *frame_size = ret;
1457  video_size += ret;
1458  // fprintf(stderr,"\nFrame: %3d size: %5d type: %d",
1459  // enc->frame_number-1, ret, enc->pict_type);
1460  /* if two pass, output log */
1461  if (ost->logfile && enc->stats_out) {
1462  fprintf(ost->logfile, "%s", enc->stats_out);
1463  }
1464  }
1465  }
1466  ost->sync_opts++;
1467  /*
1468  * For video, number of frames in == number of packets out.
1469  * But there may be reordering, so we can't throw away frames on encoder
1470  * flush, we need to limit them here, before they go into encoder.
1471  */
1472  ost->frame_number++;
1473  }
1474 }
1475 
1476 static double psnr(double d)
1477 {
1478  return -10.0 * log(d) / log(10.0);
1479 }
1480 
1482  int frame_size)
1483 {
1484  AVCodecContext *enc;
1485  int frame_number;
1486  double ti1, bitrate, avg_bitrate;
1487 
1488  /* this is executed just the first time do_video_stats is called */
1489  if (!vstats_file) {
1490  vstats_file = fopen(vstats_filename, "w");
1491  if (!vstats_file) {
1492  perror("fopen");
1493  exit_program(1);
1494  }
1495  }
1496 
1497  enc = ost->st->codec;
1498  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1499  frame_number = ost->frame_number;
1500  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1501  if (enc->flags&CODEC_FLAG_PSNR)
1502  fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1503 
1504  fprintf(vstats_file,"f_size= %6d ", frame_size);
1505  /* compute pts value */
1506  ti1 = ost->sync_opts * av_q2d(enc->time_base);
1507  if (ti1 < 0.01)
1508  ti1 = 0.01;
1509 
1510  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1511  avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1512  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1513  (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1515  }
1516 }
1517 
1518 static void print_report(OutputFile *output_files,
1519  OutputStream *ost_table, int nb_ostreams,
1520  int is_last_report, int64_t timer_start)
1521 {
1522  char buf[1024];
1523  OutputStream *ost;
1524  AVFormatContext *oc;
1525  int64_t total_size;
1526  AVCodecContext *enc;
1527  int frame_number, vid, i;
1528  double bitrate, ti1, pts;
1529  static int64_t last_time = -1;
1530  static int qp_histogram[52];
1531 
1532  if (!print_stats && !is_last_report)
1533  return;
1534 
1535  if (!is_last_report) {
1536  int64_t cur_time;
1537  /* display the report every 0.5 seconds */
1538  cur_time = av_gettime();
1539  if (last_time == -1) {
1540  last_time = cur_time;
1541  return;
1542  }
1543  if ((cur_time - last_time) < 500000)
1544  return;
1545  last_time = cur_time;
1546  }
1547 
1548 
1549  oc = output_files[0].ctx;
1550 
1551  total_size = avio_size(oc->pb);
1552  if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
1553  total_size = avio_tell(oc->pb);
1554 
1555  buf[0] = '\0';
1556  ti1 = 1e10;
1557  vid = 0;
1558  for (i = 0; i < nb_ostreams; i++) {
1559  float q = -1;
1560  ost = &ost_table[i];
1561  enc = ost->st->codec;
1562  if (!ost->stream_copy && enc->coded_frame)
1563  q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1564  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1565  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1566  }
1567  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1568  float t = (av_gettime() - timer_start) / 1000000.0;
1569 
1570  frame_number = ost->frame_number;
1571  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1572  frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
1573  if (is_last_report)
1574  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1575  if (qp_hist) {
1576  int j;
1577  int qp = lrintf(q);
1578  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1579  qp_histogram[qp]++;
1580  for (j = 0; j < 32; j++)
1581  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
1582  }
1583  if (enc->flags&CODEC_FLAG_PSNR) {
1584  int j;
1585  double error, error_sum = 0;
1586  double scale, scale_sum = 0;
1587  char type[3] = { 'Y','U','V' };
1588  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1589  for (j = 0; j < 3; j++) {
1590  if (is_last_report) {
1591  error = enc->error[j];
1592  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1593  } else {
1594  error = enc->coded_frame->error[j];
1595  scale = enc->width * enc->height * 255.0 * 255.0;
1596  }
1597  if (j)
1598  scale /= 4;
1599  error_sum += error;
1600  scale_sum += scale;
1601  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1602  }
1603  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1604  }
1605  vid = 1;
1606  }
1607  /* compute min output value */
1608  pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1609  if ((pts < ti1) && (pts > 0))
1610  ti1 = pts;
1611  }
1612  if (ti1 < 0.01)
1613  ti1 = 0.01;
1614 
1615  bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1616 
1617  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1618  "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1619  (double)total_size / 1024, ti1, bitrate);
1620 
1622  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1624 
1625  av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1626 
1627  fflush(stderr);
1628 
1629  if (is_last_report) {
1630  int64_t raw= audio_size + video_size + extra_size;
1631  av_log(NULL, AV_LOG_INFO, "\n");
1632  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1633  video_size / 1024.0,
1634  audio_size / 1024.0,
1635  extra_size / 1024.0,
1636  100.0 * (total_size - raw) / raw
1637  );
1638  }
1639 }
1640 
1641 static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
1642 {
1643  int i, ret;
1644 
1645  for (i = 0; i < nb_ostreams; i++) {
1646  OutputStream *ost = &ost_table[i];
1647  AVCodecContext *enc = ost->st->codec;
1648  AVFormatContext *os = output_files[ost->file_index].ctx;
1649  int stop_encoding = 0;
1650 
1651  if (!ost->encoding_needed)
1652  continue;
1653 
1654  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1655  continue;
1657  continue;
1658 
1659  for (;;) {
1660  AVPacket pkt;
1661  int fifo_bytes;
1662  av_init_packet(&pkt);
1663  pkt.data = NULL;
1664  pkt.size = 0;
1665 
1666  switch (ost->st->codec->codec_type) {
1667  case AVMEDIA_TYPE_AUDIO:
1668  fifo_bytes = av_fifo_size(ost->fifo);
1669  if (fifo_bytes > 0) {
1670  /* encode any samples remaining in fifo */
1671  int frame_bytes = fifo_bytes;
1672 
1673  av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
1674 
1675  /* pad last frame with silence if needed */
1677  frame_bytes = enc->frame_size * enc->channels *
1679  if (allocated_audio_buf_size < frame_bytes)
1680  exit_program(1);
1681  generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
1682  }
1683  encode_audio_frame(os, ost, audio_buf, frame_bytes);
1684  } else {
1685  /* flush encoder with NULL frames until it is done
1686  returning packets */
1687  if (encode_audio_frame(os, ost, NULL, 0) == 0) {
1688  stop_encoding = 1;
1689  break;
1690  }
1691  }
1692  break;
1693  case AVMEDIA_TYPE_VIDEO:
1694  ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
1695  if (ret < 0) {
1696  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1697  exit_program(1);
1698  }
1699  video_size += ret;
1700  if (enc->coded_frame && enc->coded_frame->key_frame)
1701  pkt.flags |= AV_PKT_FLAG_KEY;
1702  if (ost->logfile && enc->stats_out) {
1703  fprintf(ost->logfile, "%s", enc->stats_out);
1704  }
1705  if (ret <= 0) {
1706  stop_encoding = 1;
1707  break;
1708  }
1709  pkt.stream_index = ost->index;
1710  pkt.data = bit_buffer;
1711  pkt.size = ret;
1712  if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
1713  pkt.pts = av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
1714  write_frame(os, &pkt, ost);
1715  break;
1716  default:
1717  stop_encoding = 1;
1718  }
1719  if (stop_encoding)
1720  break;
1721  }
1722  }
1723 }
1724 
1725 /*
1726  * Check whether a packet from ist should be written into ost at this time
1727  */
1729 {
1730  OutputFile *of = &output_files[ost->file_index];
1731  int ist_index = ist - input_streams;
1732 
1733  if (ost->source_index != ist_index)
1734  return 0;
1735 
1736  if (of->start_time && ist->pts < of->start_time)
1737  return 0;
1738 
1739  if (of->recording_time != INT64_MAX &&
1741  (AVRational){ 1, 1000000 }) >= 0) {
1742  ost->is_past_recording_time = 1;
1743  return 0;
1744  }
1745 
1746  return 1;
1747 }
1748 
1749 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1750 {
1751  OutputFile *of = &output_files[ost->file_index];
1752  int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
1753  AVPacket opkt;
1754 
1755  av_init_packet(&opkt);
1756 
1757  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1759  return;
1760 
1761  /* force the input stream PTS */
1762  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1763  audio_size += pkt->size;
1764  else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1765  video_size += pkt->size;
1766  ost->sync_opts++;
1767  }
1768 
1769  opkt.stream_index = ost->index;
1770  if (pkt->pts != AV_NOPTS_VALUE)
1771  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1772  else
1773  opkt.pts = AV_NOPTS_VALUE;
1774 
1775  if (pkt->dts == AV_NOPTS_VALUE)
1776  opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
1777  else
1778  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1779  opkt.dts -= ost_tb_start_time;
1780 
1781  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1782  opkt.flags = pkt->flags;
1783 
1784  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1785  if ( ost->st->codec->codec_id != CODEC_ID_H264
1786  && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
1787  && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
1788  ) {
1789  if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
1791  } else {
1792  opkt.data = pkt->data;
1793  opkt.size = pkt->size;
1794  }
1795 
1796  write_frame(of->ctx, &opkt, ost);
1797  ost->st->codec->frame_number++;
1798  av_free_packet(&opkt);
1799 }
1800 
1801 static void rate_emu_sleep(InputStream *ist)
1802 {
1803  if (input_files[ist->file_index].rate_emu) {
1804  int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
1805  int64_t now = av_gettime() - ist->start;
1806  if (pts > now)
1807  usleep(pts - now);
1808  }
1809 }
1810 
1811 static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1812 {
1813  AVFrame *decoded_frame;
1814  AVCodecContext *avctx = ist->st->codec;
1816  int i, ret;
1817 
1818  if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1819  return AVERROR(ENOMEM);
1820  else
1822  decoded_frame = ist->decoded_frame;
1823 
1824  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1825  if (ret < 0) {
1826  return ret;
1827  }
1828 
1829  if (!*got_output) {
1830  /* no audio frame */
1831  return ret;
1832  }
1833 
1834  /* if the decoder provides a pts, use it instead of the last packet pts.
1835  the decoder could be delaying output by a packet or more. */
1836  if (decoded_frame->pts != AV_NOPTS_VALUE)
1837  ist->next_pts = decoded_frame->pts;
1838 
1839  /* increment next_pts to use for the case where the input stream does not
1840  have timestamps or there are multiple frames in the packet */
1841  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1842  avctx->sample_rate;
1843 
1844  // preprocess audio (volume)
1845  if (audio_volume != 256) {
1846  int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
1847  void *samples = decoded_frame->data[0];
1848  switch (avctx->sample_fmt) {
1849  case AV_SAMPLE_FMT_U8:
1850  {
1851  uint8_t *volp = samples;
1852  for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1853  int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
1854  *volp++ = av_clip_uint8(v);
1855  }
1856  break;
1857  }
1858  case AV_SAMPLE_FMT_S16:
1859  {
1860  int16_t *volp = samples;
1861  for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1862  int v = ((*volp) * audio_volume + 128) >> 8;
1863  *volp++ = av_clip_int16(v);
1864  }
1865  break;
1866  }
1867  case AV_SAMPLE_FMT_S32:
1868  {
1869  int32_t *volp = samples;
1870  for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1871  int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
1872  *volp++ = av_clipl_int32(v);
1873  }
1874  break;
1875  }
1876  case AV_SAMPLE_FMT_FLT:
1877  {
1878  float *volp = samples;
1879  float scale = audio_volume / 256.f;
1880  for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1881  *volp++ *= scale;
1882  }
1883  break;
1884  }
1885  case AV_SAMPLE_FMT_DBL:
1886  {
1887  double *volp = samples;
1888  double scale = audio_volume / 256.;
1889  for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
1890  *volp++ *= scale;
1891  }
1892  break;
1893  }
1894  default:
1896  "Audio volume adjustment on sample format %s is not supported.\n",
1898  exit_program(1);
1899  }
1900  }
1901 
1902  rate_emu_sleep(ist);
1903 
1904  for (i = 0; i < nb_output_streams; i++) {
1905  OutputStream *ost = &output_streams[i];
1906 
1907  if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1908  continue;
1909  do_audio_out(output_files[ost->file_index].ctx, ost, ist, decoded_frame);
1910  }
1911 
1912  return ret;
1913 }
1914 
1915 static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts)
1916 {
1917  AVFrame *decoded_frame, *filtered_frame = NULL;
1918  void *buffer_to_free = NULL;
1919  int i, ret = 0;
1920  float quality;
1921 #if CONFIG_AVFILTER
1922  int frame_available = 1;
1923 #endif
1924 
1925  if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
1926  return AVERROR(ENOMEM);
1927  else
1929  decoded_frame = ist->decoded_frame;
1930  pkt->pts = *pkt_pts;
1931  pkt->dts = ist->pts;
1932  *pkt_pts = AV_NOPTS_VALUE;
1933 
1934  ret = avcodec_decode_video2(ist->st->codec,
1935  decoded_frame, got_output, pkt);
1936  if (ret < 0)
1937  return ret;
1938 
1939  quality = same_quant ? decoded_frame->quality : 0;
1940  if (!*got_output) {
1941  /* no picture yet */
1942  return ret;
1943  }
1944  ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
1945  decoded_frame->pkt_dts);
1946  if (pkt->duration)
1947  ist->next_pts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
1948  else if (ist->st->codec->time_base.num != 0) {
1949  int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
1950  ist->st->codec->ticks_per_frame;
1951  ist->next_pts += ((int64_t)AV_TIME_BASE *
1952  ist->st->codec->time_base.num * ticks) /
1953  ist->st->codec->time_base.den;
1954  }
1955  pkt->size = 0;
1956  pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
1957 
1958  rate_emu_sleep(ist);
1959 
1960  for (i = 0; i < nb_output_streams; i++) {
1961  OutputStream *ost = &output_streams[i];
1962  int frame_size, resample_changed;
1963 
1964  if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
1965  continue;
1966 
1967 #if CONFIG_AVFILTER
1968  resample_changed = ost->resample_width != decoded_frame->width ||
1969  ost->resample_height != decoded_frame->height ||
1970  ost->resample_pix_fmt != decoded_frame->format;
1971  if (resample_changed) {
1973  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
1974  ist->file_index, ist->st->index,
1976  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
1977 
1978  avfilter_graph_free(&ost->graph);
1979  if (configure_video_filters(ist, ost)) {
1980  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1981  exit_program(1);
1982  }
1983 
1984  ost->resample_width = decoded_frame->width;
1985  ost->resample_height = decoded_frame->height;
1986  ost->resample_pix_fmt = decoded_frame->format;
1987  }
1988 
1989  if (ist->st->sample_aspect_ratio.num)
1990  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1991  if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
1992  FrameBuffer *buf = decoded_frame->opaque;
1994  decoded_frame->data, decoded_frame->linesize,
1996  ist->st->codec->width, ist->st->codec->height,
1997  ist->st->codec->pix_fmt);
1998 
1999  avfilter_copy_frame_props(fb, decoded_frame);
2000  fb->pts = ist->pts;
2001  fb->buf->priv = buf;
2003 
2004  buf->refcount++;
2005  av_buffersrc_buffer(ost->input_video_filter, fb);
2006  } else
2007  av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame,
2008  ist->pts, decoded_frame->sample_aspect_ratio);
2009 
2010  if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
2011  av_free(buffer_to_free);
2012  return AVERROR(ENOMEM);
2013  } else
2015  filtered_frame = ist->filtered_frame;
2016 
2017  frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
2018  while (frame_available) {
2019  AVRational ist_pts_tb;
2020  if (ost->output_video_filter)
2021  get_filtered_video_frame(ost->output_video_filter, filtered_frame, &ost->picref, &ist_pts_tb);
2022  if (ost->picref)
2023  ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
2024  if (ost->picref->video && !ost->frame_aspect_ratio)
2025  ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
2026 #else
2027  filtered_frame = decoded_frame;
2028 #endif
2029 
2030  do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size,
2031  same_quant ? quality : ost->st->codec->global_quality);
2032  if (vstats_filename && frame_size)
2033  do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
2034 #if CONFIG_AVFILTER
2035  frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
2036  if (ost->picref)
2037  avfilter_unref_buffer(ost->picref);
2038  }
2039 #endif
2040  }
2041 
2042  av_free(buffer_to_free);
2043  return ret;
2044 }
2045 
2046 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2047 {
2048  AVSubtitle subtitle;
2049  int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2050  &subtitle, got_output, pkt);
2051  if (ret < 0)
2052  return ret;
2053  if (!*got_output)
2054  return ret;
2055 
2056  rate_emu_sleep(ist);
2057 
2058  for (i = 0; i < nb_output_streams; i++) {
2059  OutputStream *ost = &output_streams[i];
2060 
2061  if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2062  continue;
2063 
2064  do_subtitle_out(output_files[ost->file_index].ctx, ost, ist, &subtitle, pkt->pts);
2065  }
2066 
2067  avsubtitle_free(&subtitle);
2068  return ret;
2069 }
2070 
2071 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2072 static int output_packet(InputStream *ist,
2073  OutputStream *ost_table, int nb_ostreams,
2074  const AVPacket *pkt)
2075 {
2076  int i;
2077  int got_output;
2078  int64_t pkt_pts = AV_NOPTS_VALUE;
2079  AVPacket avpkt;
2080 
2081  if (ist->next_pts == AV_NOPTS_VALUE)
2082  ist->next_pts = ist->pts;
2083 
2084  if (pkt == NULL) {
2085  /* EOF handling */
2086  av_init_packet(&avpkt);
2087  avpkt.data = NULL;
2088  avpkt.size = 0;
2089  goto handle_eof;
2090  } else {
2091  avpkt = *pkt;
2092  }
2093 
2094  if (pkt->dts != AV_NOPTS_VALUE)
2095  ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2096  if (pkt->pts != AV_NOPTS_VALUE)
2097  pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2098 
2099  // while we have more to decode or while the decoder did output something on EOF
2100  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2101  int ret = 0;
2102  handle_eof:
2103 
2104  ist->pts = ist->next_pts;
2105 
2106  if (avpkt.size && avpkt.size != pkt->size) {
2108  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2109  ist->showed_multi_packet_warning = 1;
2110  }
2111 
2112  switch (ist->st->codec->codec_type) {
2113  case AVMEDIA_TYPE_AUDIO:
2114  ret = transcode_audio (ist, &avpkt, &got_output);
2115  break;
2116  case AVMEDIA_TYPE_VIDEO:
2117  ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts);
2118  break;
2119  case AVMEDIA_TYPE_SUBTITLE:
2120  ret = transcode_subtitles(ist, &avpkt, &got_output);
2121  break;
2122  default:
2123  return -1;
2124  }
2125 
2126  if (ret < 0)
2127  return ret;
2128  // touch data and size only if not EOF
2129  if (pkt) {
2130  avpkt.data += ret;
2131  avpkt.size -= ret;
2132  }
2133  if (!got_output) {
2134  continue;
2135  }
2136  }
2137 
2138  /* handle stream copy */
2139  if (!ist->decoding_needed) {
2140  rate_emu_sleep(ist);
2141  ist->pts = ist->next_pts;
2142  switch (ist->st->codec->codec_type) {
2143  case AVMEDIA_TYPE_AUDIO:
2144  ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2145  ist->st->codec->sample_rate;
2146  break;
2147  case AVMEDIA_TYPE_VIDEO:
2148  if (ist->st->codec->time_base.num != 0) {
2149  int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2150  ist->next_pts += ((int64_t)AV_TIME_BASE *
2151  ist->st->codec->time_base.num * ticks) /
2152  ist->st->codec->time_base.den;
2153  }
2154  break;
2155  }
2156  }
2157  for (i = 0; pkt && i < nb_ostreams; i++) {
2158  OutputStream *ost = &ost_table[i];
2159 
2160  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2161  continue;
2162 
2163  do_streamcopy(ist, ost, pkt);
2164  }
2165 
2166  return 0;
2167 }
2168 
2169 static void print_sdp(OutputFile *output_files, int n)
2170 {
2171  char sdp[2048];
2172  int i;
2173  AVFormatContext **avc = av_malloc(sizeof(*avc) * n);
2174 
2175  if (!avc)
2176  exit_program(1);
2177  for (i = 0; i < n; i++)
2178  avc[i] = output_files[i].ctx;
2179 
2180  av_sdp_create(avc, n, sdp, sizeof(sdp));
2181  printf("SDP:\n%s\n", sdp);
2182  fflush(stdout);
2183  av_freep(&avc);
2184 }
2185 
2186 static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams,
2187  char *error, int error_len)
2188 {
2189  int i;
2190  InputStream *ist = &input_streams[ist_index];
2191  if (ist->decoding_needed) {
2192  AVCodec *codec = ist->dec;
2193  if (!codec) {
2194  snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
2195  ist->st->codec->codec_id, ist->file_index, ist->st->index);
2196  return AVERROR(EINVAL);
2197  }
2198 
2199  /* update requested sample format for the decoder based on the
2200  corresponding encoder sample format */
2201  for (i = 0; i < nb_output_streams; i++) {
2202  OutputStream *ost = &output_streams[i];
2203  if (ost->source_index == ist_index) {
2204  update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2205  break;
2206  }
2207  }
2208 
2209  if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
2212  ist->st->codec->opaque = ist;
2213  }
2214 
2215  if (!av_dict_get(ist->opts, "threads", NULL, 0))
2216  av_dict_set(&ist->opts, "threads", "auto", 0);
2217  if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
2218  snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
2219  ist->file_index, ist->st->index);
2220  return AVERROR(EINVAL);
2221  }
2223  assert_avoptions(ist->opts);
2224  }
2225 
2226  ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2227  ist->next_pts = AV_NOPTS_VALUE;
2229  ist->is_start = 1;
2230 
2231  return 0;
2232 }
2233 
2234 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2235  AVCodecContext *avctx)
2236 {
2237  char *p;
2238  int n = 1, i;
2239  int64_t t;
2240 
2241  for (p = kf; *p; p++)
2242  if (*p == ',')
2243  n++;
2244  ost->forced_kf_count = n;
2245  ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
2246  if (!ost->forced_kf_pts) {
2247  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2248  exit_program(1);
2249  }
2250 
2251  p = kf;
2252  for (i = 0; i < n; i++) {
2253  char *next = strchr(p, ',');
2254 
2255  if (next)
2256  *next++ = 0;
2257 
2258  t = parse_time_or_die("force_key_frames", p, 1);
2259  ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2260 
2261  p = next;
2262  }
2263 }
2264 
2265 static int transcode_init(OutputFile *output_files,
2266  int nb_output_files,
2267  InputFile *input_files,
2268  int nb_input_files)
2269 {
2270  int ret = 0, i, j, k;
2271  AVFormatContext *oc;
2272  AVCodecContext *codec, *icodec;
2273  OutputStream *ost;
2274  InputStream *ist;
2275  char error[1024];
2276  int want_sdp = 1;
2277 
2278  /* init framerate emulation */
2279  for (i = 0; i < nb_input_files; i++) {
2280  InputFile *ifile = &input_files[i];
2281  if (ifile->rate_emu)
2282  for (j = 0; j < ifile->nb_streams; j++)
2283  input_streams[j + ifile->ist_index].start = av_gettime();
2284  }
2285 
2286  /* output stream init */
2287  for (i = 0; i < nb_output_files; i++) {
2288  oc = output_files[i].ctx;
2289  if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2290  av_dump_format(oc, i, oc->filename, 1);
2291  av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2292  return AVERROR(EINVAL);
2293  }
2294  }
2295 
2296  /* for each output stream, we compute the right encoding parameters */
2297  for (i = 0; i < nb_output_streams; i++) {
2298  ost = &output_streams[i];
2299  oc = output_files[ost->file_index].ctx;
2300  ist = &input_streams[ost->source_index];
2301 
2302  if (ost->attachment_filename)
2303  continue;
2304 
2305  codec = ost->st->codec;
2306  icodec = ist->st->codec;
2307 
2308  ost->st->disposition = ist->st->disposition;
2309  codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2311 
2312  if (ost->stream_copy) {
2313  uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2314 
2315  if (extra_size > INT_MAX) {
2316  return AVERROR(EINVAL);
2317  }
2318 
2319  /* if stream_copy is selected, no need to decode or encode */
2320  codec->codec_id = icodec->codec_id;
2321  codec->codec_type = icodec->codec_type;
2322 
2323  if (!codec->codec_tag) {
2324  if (!oc->oformat->codec_tag ||
2325  av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2326  av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2327  codec->codec_tag = icodec->codec_tag;
2328  }
2329 
2330  codec->bit_rate = icodec->bit_rate;
2331  codec->rc_max_rate = icodec->rc_max_rate;
2332  codec->rc_buffer_size = icodec->rc_buffer_size;
2333  codec->field_order = icodec->field_order;
2334  codec->extradata = av_mallocz(extra_size);
2335  if (!codec->extradata) {
2336  return AVERROR(ENOMEM);
2337  }
2338  memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2339  codec->extradata_size = icodec->extradata_size;
2340  if (!copy_tb) {
2341  codec->time_base = icodec->time_base;
2342  codec->time_base.num *= icodec->ticks_per_frame;
2343  av_reduce(&codec->time_base.num, &codec->time_base.den,
2344  codec->time_base.num, codec->time_base.den, INT_MAX);
2345  } else
2346  codec->time_base = ist->st->time_base;
2347 
2348  switch (codec->codec_type) {
2349  case AVMEDIA_TYPE_AUDIO:
2350  if (audio_volume != 256) {
2351  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2352  exit_program(1);
2353  }
2354  codec->channel_layout = icodec->channel_layout;
2355  codec->sample_rate = icodec->sample_rate;
2356  codec->channels = icodec->channels;
2357  codec->frame_size = icodec->frame_size;
2358  codec->audio_service_type = icodec->audio_service_type;
2359  codec->block_align = icodec->block_align;
2360  break;
2361  case AVMEDIA_TYPE_VIDEO:
2362  codec->pix_fmt = icodec->pix_fmt;
2363  codec->width = icodec->width;
2364  codec->height = icodec->height;
2365  codec->has_b_frames = icodec->has_b_frames;
2366  if (!codec->sample_aspect_ratio.num) {
2367  codec->sample_aspect_ratio =
2368  ost->st->sample_aspect_ratio =
2370  ist->st->codec->sample_aspect_ratio.num ?
2371  ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2372  }
2373  break;
2374  case AVMEDIA_TYPE_SUBTITLE:
2375  codec->width = icodec->width;
2376  codec->height = icodec->height;
2377  break;
2378  case AVMEDIA_TYPE_DATA:
2380  break;
2381  default:
2382  abort();
2383  }
2384  } else {
2385  if (!ost->enc)
2386  ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
2387 
2388  ist->decoding_needed = 1;
2389  ost->encoding_needed = 1;
2390 
2391  switch (codec->codec_type) {
2392  case AVMEDIA_TYPE_AUDIO:
2393  ost->fifo = av_fifo_alloc(1024);
2394  if (!ost->fifo) {
2395  return AVERROR(ENOMEM);
2396  }
2398 
2399  if (!codec->sample_rate)
2400  codec->sample_rate = icodec->sample_rate;
2401  choose_sample_rate(ost->st, ost->enc);
2402  codec->time_base = (AVRational){ 1, codec->sample_rate };
2403 
2404  if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
2405  codec->sample_fmt = icodec->sample_fmt;
2406  choose_sample_fmt(ost->st, ost->enc);
2407 
2408  if (!codec->channels)
2409  codec->channels = icodec->channels;
2410  codec->channel_layout = icodec->channel_layout;
2412  codec->channel_layout = 0;
2413 
2414  ost->audio_resample = codec-> sample_rate != icodec->sample_rate || audio_sync_method > 1;
2415  icodec->request_channels = codec-> channels;
2416  ost->resample_sample_fmt = icodec->sample_fmt;
2417  ost->resample_sample_rate = icodec->sample_rate;
2418  ost->resample_channels = icodec->channels;
2419  break;
2420  case AVMEDIA_TYPE_VIDEO:
2421  if (codec->pix_fmt == PIX_FMT_NONE)
2422  codec->pix_fmt = icodec->pix_fmt;
2423  choose_pixel_fmt(ost->st, ost->enc);
2424 
2425  if (ost->st->codec->pix_fmt == PIX_FMT_NONE) {
2426  av_log(NULL, AV_LOG_FATAL, "Video pixel format is unknown, stream cannot be encoded\n");
2427  exit_program(1);
2428  }
2429 
2430  if (!codec->width || !codec->height) {
2431  codec->width = icodec->width;
2432  codec->height = icodec->height;
2433  }
2434 
2435  ost->video_resample = codec->width != icodec->width ||
2436  codec->height != icodec->height ||
2437  codec->pix_fmt != icodec->pix_fmt;
2438  if (ost->video_resample) {
2439 #if !CONFIG_AVFILTER
2441  if (avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
2442  codec->width, codec->height)) {
2443  av_log(NULL, AV_LOG_FATAL, "Cannot allocate temp picture, check pix fmt\n");
2444  exit_program(1);
2445  }
2447  icodec->width,
2448  icodec->height,
2449  icodec->pix_fmt,
2450  codec->width,
2451  codec->height,
2452  codec->pix_fmt,
2453  ost->sws_flags, NULL, NULL, NULL);
2454  if (ost->img_resample_ctx == NULL) {
2455  av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n");
2456  exit_program(1);
2457  }
2458 #endif
2459  codec->bits_per_raw_sample = 0;
2460  }
2461 
2462  ost->resample_height = icodec->height;
2463  ost->resample_width = icodec->width;
2464  ost->resample_pix_fmt = icodec->pix_fmt;
2465 
2466  if (!ost->frame_rate.num)
2467  ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational) { 25, 1 };
2468  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2470  ost->frame_rate = ost->enc->supported_framerates[idx];
2471  }
2472  codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
2473 
2474 #if CONFIG_AVFILTER
2475  if (configure_video_filters(ist, ost)) {
2476  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2477  exit(1);
2478  }
2479 #endif
2480  if (ost->forced_keyframes)
2482  ost->st->codec);
2483  break;
2484  case AVMEDIA_TYPE_SUBTITLE:
2485  break;
2486  default:
2487  abort();
2488  break;
2489  }
2490  /* two pass mode */
2491  if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2492  char logfilename[1024];
2493  FILE *f;
2494 
2495  snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2497  i);
2498  if (!strcmp(ost->enc->name, "libx264")) {
2499  av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2500  } else {
2501  if (codec->flags & CODEC_FLAG_PASS1) {
2502  f = fopen(logfilename, "wb");
2503  if (!f) {
2504  av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2505  logfilename, strerror(errno));
2506  exit_program(1);
2507  }
2508  ost->logfile = f;
2509  } else {
2510  char *logbuffer;
2511  size_t logbuffer_size;
2512  if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2513  av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2514  logfilename);
2515  exit_program(1);
2516  }
2517  codec->stats_in = logbuffer;
2518  }
2519  }
2520  }
2521  }
2522  if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2523  int size = codec->width * codec->height;
2524  bit_buffer_size = FFMAX(bit_buffer_size, 6 * size + 200);
2525  }
2526  }
2527 
2528  if (!bit_buffer)
2530  if (!bit_buffer) {
2531  av_log(NULL, AV_LOG_ERROR, "Cannot allocate %d bytes output buffer\n",
2532  bit_buffer_size);
2533  return AVERROR(ENOMEM);
2534  }
2535 
2536  /* open each encoder */
2537  for (i = 0; i < nb_output_streams; i++) {
2538  ost = &output_streams[i];
2539  if (ost->encoding_needed) {
2540  AVCodec *codec = ost->enc;
2541  AVCodecContext *dec = input_streams[ost->source_index].st->codec;
2542  if (!codec) {
2543  snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d:%d",
2544  ost->st->codec->codec_id, ost->file_index, ost->index);
2545  ret = AVERROR(EINVAL);
2546  goto dump_format;
2547  }
2548  if (dec->subtitle_header) {
2550  if (!ost->st->codec->subtitle_header) {
2551  ret = AVERROR(ENOMEM);
2552  goto dump_format;
2553  }
2554  memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2556  }
2557  if (!av_dict_get(ost->opts, "threads", NULL, 0))
2558  av_dict_set(&ost->opts, "threads", "auto", 0);
2559  if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
2560  snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2561  ost->file_index, ost->index);
2562  ret = AVERROR(EINVAL);
2563  goto dump_format;
2564  }
2566  assert_avoptions(ost->opts);
2567  if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2568  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2569  "It takes bits/s as argument, not kbits/s\n");
2570  extra_size += ost->st->codec->extradata_size;
2571 
2572  if (ost->st->codec->me_threshold)
2573  input_streams[ost->source_index].st->codec->debug |= FF_DEBUG_MV;
2574  }
2575  }
2576 
2577  /* init input streams */
2578  for (i = 0; i < nb_input_streams; i++)
2579  if ((ret = init_input_stream(i, output_streams, nb_output_streams, error, sizeof(error))) < 0)
2580  goto dump_format;
2581 
2582  /* discard unused programs */
2583  for (i = 0; i < nb_input_files; i++) {
2584  InputFile *ifile = &input_files[i];
2585  for (j = 0; j < ifile->ctx->nb_programs; j++) {
2586  AVProgram *p = ifile->ctx->programs[j];
2587  int discard = AVDISCARD_ALL;
2588 
2589  for (k = 0; k < p->nb_stream_indexes; k++)
2590  if (!input_streams[ifile->ist_index + p->stream_index[k]].discard) {
2591  discard = AVDISCARD_DEFAULT;
2592  break;
2593  }
2594  p->discard = discard;
2595  }
2596  }
2597 
2598  /* open files and write file headers */
2599  for (i = 0; i < nb_output_files; i++) {
2600  oc = output_files[i].ctx;
2601  oc->interrupt_callback = int_cb;
2602  if (avformat_write_header(oc, &output_files[i].opts) < 0) {
2603  snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2604  ret = AVERROR(EINVAL);
2605  goto dump_format;
2606  }
2607  assert_avoptions(output_files[i].opts);
2608  if (strcmp(oc->oformat->name, "rtp")) {
2609  want_sdp = 0;
2610  }
2611  }
2612 
2613  dump_format:
2614  /* dump the file output parameters - cannot be done before in case
2615  of stream copy */
2616  for (i = 0; i < nb_output_files; i++) {
2617  av_dump_format(output_files[i].ctx, i, output_files[i].ctx->filename, 1);
2618  }
2619 
2620  /* dump the stream mapping */
2621  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2622  for (i = 0; i < nb_output_streams; i++) {
2623  ost = &output_streams[i];
2624 
2625  if (ost->attachment_filename) {
2626  /* an attached file */
2627  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2628  ost->attachment_filename, ost->file_index, ost->index);
2629  continue;
2630  }
2631  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2632  input_streams[ost->source_index].file_index,
2633  input_streams[ost->source_index].st->index,
2634  ost->file_index,
2635  ost->index);
2636  if (ost->sync_ist != &input_streams[ost->source_index])
2637  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2638  ost->sync_ist->file_index,
2639  ost->sync_ist->st->index);
2640  if (ost->stream_copy)
2641  av_log(NULL, AV_LOG_INFO, " (copy)");
2642  else
2643  av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index].dec ?
2644  input_streams[ost->source_index].dec->name : "?",
2645  ost->enc ? ost->enc->name : "?");
2646  av_log(NULL, AV_LOG_INFO, "\n");
2647  }
2648 
2649  if (ret) {
2650  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2651  return ret;
2652  }
2653 
2654  if (want_sdp) {
2655  print_sdp(output_files, nb_output_files);
2656  }
2657 
2658  return 0;
2659 }
2660 
2661 /*
2662  * The following code is the main loop of the file converter
2663  */
2664 static int transcode(OutputFile *output_files,
2665  int nb_output_files,
2666  InputFile *input_files,
2667  int nb_input_files)
2668 {
2669  int ret, i;
2670  AVFormatContext *is, *os;
2671  OutputStream *ost;
2672  InputStream *ist;
2673  uint8_t *no_packet;
2674  int no_packet_count = 0;
2675  int64_t timer_start;
2676 
2677  if (!(no_packet = av_mallocz(nb_input_files)))
2678  exit_program(1);
2679 
2680  ret = transcode_init(output_files, nb_output_files, input_files, nb_input_files);
2681  if (ret < 0)
2682  goto fail;
2683 
2684  av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2685  term_init();
2686 
2687  timer_start = av_gettime();
2688 
2689  for (; received_sigterm == 0;) {
2690  int file_index, ist_index;
2691  AVPacket pkt;
2692  int64_t ipts_min;
2693  double opts_min;
2694 
2695  ipts_min = INT64_MAX;
2696  opts_min = 1e100;
2697 
2698  /* select the stream that we must read now by looking at the
2699  smallest output pts */
2700  file_index = -1;
2701  for (i = 0; i < nb_output_streams; i++) {
2702  OutputFile *of;
2703  int64_t ipts;
2704  double opts;
2705  ost = &output_streams[i];
2706  of = &output_files[ost->file_index];
2707  os = output_files[ost->file_index].ctx;
2708  ist = &input_streams[ost->source_index];
2709  if (ost->is_past_recording_time || no_packet[ist->file_index] ||
2710  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2711  continue;
2712  opts = ost->st->pts.val * av_q2d(ost->st->time_base);
2713  ipts = ist->pts;
2714  if (!input_files[ist->file_index].eof_reached) {
2715  if (ipts < ipts_min) {
2716  ipts_min = ipts;
2717  if (input_sync)
2718  file_index = ist->file_index;
2719  }
2720  if (opts < opts_min) {
2721  opts_min = opts;
2722  if (!input_sync) file_index = ist->file_index;
2723  }
2724  }
2725  if (ost->frame_number >= ost->max_frames) {
2726  int j;
2727  for (j = 0; j < of->ctx->nb_streams; j++)
2728  output_streams[of->ost_index + j].is_past_recording_time = 1;
2729  continue;
2730  }
2731  }
2732  /* if none, if is finished */
2733  if (file_index < 0) {
2734  if (no_packet_count) {
2735  no_packet_count = 0;
2736  memset(no_packet, 0, nb_input_files);
2737  usleep(10000);
2738  continue;
2739  }
2740  break;
2741  }
2742 
2743  /* read a frame from it and output it in the fifo */
2744  is = input_files[file_index].ctx;
2745  ret = av_read_frame(is, &pkt);
2746  if (ret == AVERROR(EAGAIN)) {
2747  no_packet[file_index] = 1;
2748  no_packet_count++;
2749  continue;
2750  }
2751  if (ret < 0) {
2752  input_files[file_index].eof_reached = 1;
2753  if (opt_shortest)
2754  break;
2755  else
2756  continue;
2757  }
2758 
2759  no_packet_count = 0;
2760  memset(no_packet, 0, nb_input_files);
2761 
2762  if (do_pkt_dump) {
2764  is->streams[pkt.stream_index]);
2765  }
2766  /* the following test is needed in case new streams appear
2767  dynamically in stream : we ignore them */
2768  if (pkt.stream_index >= input_files[file_index].nb_streams)
2769  goto discard_packet;
2770  ist_index = input_files[file_index].ist_index + pkt.stream_index;
2771  ist = &input_streams[ist_index];
2772  if (ist->discard)
2773  goto discard_packet;
2774 
2775  if (pkt.dts != AV_NOPTS_VALUE)
2776  pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2777  if (pkt.pts != AV_NOPTS_VALUE)
2778  pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2779 
2780  if (pkt.pts != AV_NOPTS_VALUE)
2781  pkt.pts *= ist->ts_scale;
2782  if (pkt.dts != AV_NOPTS_VALUE)
2783  pkt.dts *= ist->ts_scale;
2784 
2785  //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
2786  // ist->next_pts,
2787  // pkt.dts, input_files[ist->file_index].ts_offset,
2788  // ist->st->codec->codec_type);
2789  if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE
2790  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
2791  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2792  int64_t delta = pkt_dts - ist->next_pts;
2793  if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->pts) && !copy_ts) {
2794  input_files[ist->file_index].ts_offset -= delta;
2796  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2797  delta, input_files[ist->file_index].ts_offset);
2798  pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2799  if (pkt.pts != AV_NOPTS_VALUE)
2800  pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2801  }
2802  }
2803 
2804  // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
2805  if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) {
2806 
2807  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
2808  ist->file_index, ist->st->index);
2809  if (exit_on_error)
2810  exit_program(1);
2811  av_free_packet(&pkt);
2812  continue;
2813  }
2814 
2815  discard_packet:
2816  av_free_packet(&pkt);
2817 
2818  /* dump report by using the output first video and audio streams */
2819  print_report(output_files, output_streams, nb_output_streams, 0, timer_start);
2820  }
2821 
2822  /* at the end of stream, we must flush the decoder buffers */
2823  for (i = 0; i < nb_input_streams; i++) {
2824  ist = &input_streams[i];
2825  if (ist->decoding_needed) {
2826  output_packet(ist, output_streams, nb_output_streams, NULL);
2827  }
2828  }
2829  flush_encoders(output_streams, nb_output_streams);
2830 
2831  term_exit();
2832 
2833  /* write the trailer if needed and close file */
2834  for (i = 0; i < nb_output_files; i++) {
2835  os = output_files[i].ctx;
2836  av_write_trailer(os);
2837  }
2838 
2839  /* dump report by using the first video and audio streams */
2840  print_report(output_files, output_streams, nb_output_streams, 1, timer_start);
2841 
2842  /* close each encoder */
2843  for (i = 0; i < nb_output_streams; i++) {
2844  ost = &output_streams[i];
2845  if (ost->encoding_needed) {
2846  av_freep(&ost->st->codec->stats_in);
2847  avcodec_close(ost->st->codec);
2848  }
2849 #if CONFIG_AVFILTER
2850  avfilter_graph_free(&ost->graph);
2851 #endif
2852  }
2853 
2854  /* close each decoder */
2855  for (i = 0; i < nb_input_streams; i++) {
2856  ist = &input_streams[i];
2857  if (ist->decoding_needed) {
2858  avcodec_close(ist->st->codec);
2859  }
2860  }
2861 
2862  /* finished ! */
2863  ret = 0;
2864 
2865  fail:
2866  av_freep(&bit_buffer);
2867  av_freep(&no_packet);
2868 
2869  if (output_streams) {
2870  for (i = 0; i < nb_output_streams; i++) {
2871  ost = &output_streams[i];
2872  if (ost) {
2873  if (ost->stream_copy)
2874  av_freep(&ost->st->codec->extradata);
2875  if (ost->logfile) {
2876  fclose(ost->logfile);
2877  ost->logfile = NULL;
2878  }
2879  av_fifo_free(ost->fifo); /* works even if fifo is not
2880  initialized but set to zero */
2881  av_freep(&ost->st->codec->subtitle_header);
2882  av_free(ost->pict_tmp.data[0]);
2883  av_free(ost->forced_kf_pts);
2884  if (ost->video_resample)
2886  if (ost->resample)
2888  if (ost->reformat_ctx)
2890  av_dict_free(&ost->opts);
2891  }
2892  }
2893  }
2894  return ret;
2895 }
2896 
2897 static double parse_frame_aspect_ratio(const char *arg)
2898 {
2899  int x = 0, y = 0;
2900  double ar = 0;
2901  const char *p;
2902  char *end;
2903 
2904  p = strchr(arg, ':');
2905  if (p) {
2906  x = strtol(arg, &end, 10);
2907  if (end == p)
2908  y = strtol(end + 1, &end, 10);
2909  if (x > 0 && y > 0)
2910  ar = (double)x / (double)y;
2911  } else
2912  ar = strtod(arg, NULL);
2913 
2914  if (!ar) {
2915  av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
2916  exit_program(1);
2917  }
2918  return ar;
2919 }
2920 
2921 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
2922 {
2923  return parse_option(o, "codec:a", arg, options);
2924 }
2925 
2926 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
2927 {
2928  return parse_option(o, "codec:v", arg, options);
2929 }
2930 
2931 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
2932 {
2933  return parse_option(o, "codec:s", arg, options);
2934 }
2935 
2936 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
2937 {
2938  return parse_option(o, "codec:d", arg, options);
2939 }
2940 
2941 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
2942 {
2943  StreamMap *m = NULL;
2944  int i, negative = 0, file_idx;
2945  int sync_file_idx = -1, sync_stream_idx;
2946  char *p, *sync;
2947  char *map;
2948 
2949  if (*arg == '-') {
2950  negative = 1;
2951  arg++;
2952  }
2953  map = av_strdup(arg);
2954 
2955  /* parse sync stream first, just pick first matching stream */
2956  if (sync = strchr(map, ',')) {
2957  *sync = 0;
2958  sync_file_idx = strtol(sync + 1, &sync, 0);
2959  if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
2960  av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
2961  exit_program(1);
2962  }
2963  if (*sync)
2964  sync++;
2965  for (i = 0; i < input_files[sync_file_idx].nb_streams; i++)
2966  if (check_stream_specifier(input_files[sync_file_idx].ctx,
2967  input_files[sync_file_idx].ctx->streams[i], sync) == 1) {
2968  sync_stream_idx = i;
2969  break;
2970  }
2971  if (i == input_files[sync_file_idx].nb_streams) {
2972  av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
2973  "match any streams.\n", arg);
2974  exit_program(1);
2975  }
2976  }
2977 
2978 
2979  file_idx = strtol(map, &p, 0);
2980  if (file_idx >= nb_input_files || file_idx < 0) {
2981  av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
2982  exit_program(1);
2983  }
2984  if (negative)
2985  /* disable some already defined maps */
2986  for (i = 0; i < o->nb_stream_maps; i++) {
2987  m = &o->stream_maps[i];
2988  if (file_idx == m->file_index &&
2989  check_stream_specifier(input_files[m->file_index].ctx,
2990  input_files[m->file_index].ctx->streams[m->stream_index],
2991  *p == ':' ? p + 1 : p) > 0)
2992  m->disabled = 1;
2993  }
2994  else
2995  for (i = 0; i < input_files[file_idx].nb_streams; i++) {
2996  if (check_stream_specifier(input_files[file_idx].ctx, input_files[file_idx].ctx->streams[i],
2997  *p == ':' ? p + 1 : p) <= 0)
2998  continue;
2999  o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3000  &o->nb_stream_maps, o->nb_stream_maps + 1);
3001  m = &o->stream_maps[o->nb_stream_maps - 1];
3002 
3003  m->file_index = file_idx;
3004  m->stream_index = i;
3005 
3006  if (sync_file_idx >= 0) {
3007  m->sync_file_index = sync_file_idx;
3008  m->sync_stream_index = sync_stream_idx;
3009  } else {
3010  m->sync_file_index = file_idx;
3011  m->sync_stream_index = i;
3012  }
3013  }
3014 
3015  if (!m) {
3016  av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
3017  exit_program(1);
3018  }
3019 
3020  av_freep(&map);
3021  return 0;
3022 }
3023 
3024 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
3025 {
3026  o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
3027  &o->nb_attachments, o->nb_attachments + 1);
3028  o->attachments[o->nb_attachments - 1] = arg;
3029  return 0;
3030 }
3031 
3038 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
3039 {
3040  if (*arg) {
3041  *type = *arg;
3042  switch (*arg) {
3043  case 'g':
3044  break;
3045  case 's':
3046  if (*(++arg) && *arg != ':') {
3047  av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
3048  exit_program(1);
3049  }
3050  *stream_spec = *arg == ':' ? arg + 1 : "";
3051  break;
3052  case 'c':
3053  case 'p':
3054  if (*(++arg) == ':')
3055  *index = strtol(++arg, NULL, 0);
3056  break;
3057  default:
3058  av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
3059  exit_program(1);
3060  }
3061  } else
3062  *type = 'g';
3063 }
3064 
3065 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
3066 {
3067  AVDictionary **meta_in = NULL;
3068  AVDictionary **meta_out;
3069  int i, ret = 0;
3070  char type_in, type_out;
3071  const char *istream_spec = NULL, *ostream_spec = NULL;
3072  int idx_in = 0, idx_out = 0;
3073 
3074  parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
3075  parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
3076 
3077  if (type_in == 'g' || type_out == 'g')
3078  o->metadata_global_manual = 1;
3079  if (type_in == 's' || type_out == 's')
3080  o->metadata_streams_manual = 1;
3081  if (type_in == 'c' || type_out == 'c')
3082  o->metadata_chapters_manual = 1;
3083 
3084 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
3085  if ((index) < 0 || (index) >= (nb_elems)) {\
3086  av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
3087  (desc), (index));\
3088  exit_program(1);\
3089  }
3090 
3091 #define SET_DICT(type, meta, context, index)\
3092  switch (type) {\
3093  case 'g':\
3094  meta = &context->metadata;\
3095  break;\
3096  case 'c':\
3097  METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
3098  meta = &context->chapters[index]->metadata;\
3099  break;\
3100  case 'p':\
3101  METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
3102  meta = &context->programs[index]->metadata;\
3103  break;\
3104  case 's':\
3105  break; /* handled separately below */ \
3106  }\
3107 
3108  SET_DICT(type_in, meta_in, ic, idx_in);
3109  SET_DICT(type_out, meta_out, oc, idx_out);
3110 
3111  /* for input streams choose first matching stream */
3112  if (type_in == 's') {
3113  for (i = 0; i < ic->nb_streams; i++) {
3114  if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
3115  meta_in = &ic->streams[i]->metadata;
3116  break;
3117  } else if (ret < 0)
3118  exit_program(1);
3119  }
3120  if (!meta_in) {
3121  av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
3122  exit_program(1);
3123  }
3124  }
3125 
3126  if (type_out == 's') {
3127  for (i = 0; i < oc->nb_streams; i++) {
3128  if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
3129  meta_out = &oc->streams[i]->metadata;
3130  av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3131  } else if (ret < 0)
3132  exit_program(1);
3133  }
3134  } else
3135  av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3136 
3137  return 0;
3138 }
3139 
3140 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
3141 {
3142  const char *codec_string = encoder ? "encoder" : "decoder";
3143  AVCodec *codec;
3144 
3145  codec = encoder ?
3148  if (!codec) {
3149  av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
3150  exit_program(1);
3151  }
3152  if (codec->type != type) {
3153  av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
3154  exit_program(1);
3155  }
3156  return codec;
3157 }
3158 
3160 {
3161  char *codec_name = NULL;
3162 
3163  MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
3164  if (codec_name) {
3165  AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
3166  st->codec->codec_id = codec->id;
3167  return codec;
3168  } else
3169  return avcodec_find_decoder(st->codec->codec_id);
3170 }
3171 
3177 {
3178  int i;
3179 
3180  for (i = 0; i < ic->nb_streams; i++) {
3181  AVStream *st = ic->streams[i];
3182  AVCodecContext *dec = st->codec;
3183  InputStream *ist;
3184 
3185  input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
3186  ist = &input_streams[nb_input_streams - 1];
3187  ist->st = st;
3188  ist->file_index = nb_input_files;
3189  ist->discard = 1;
3190  ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
3191 
3192  ist->ts_scale = 1.0;
3193  MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
3194 
3195  ist->dec = choose_decoder(o, ic, st);
3196 
3197  switch (dec->codec_type) {
3198  case AVMEDIA_TYPE_AUDIO:
3199  if (o->audio_disable)
3200  st->discard = AVDISCARD_ALL;
3201  break;
3202  case AVMEDIA_TYPE_VIDEO:
3203  if (dec->lowres) {
3204  dec->flags |= CODEC_FLAG_EMU_EDGE;
3205  dec->height >>= dec->lowres;
3206  dec->width >>= dec->lowres;
3207  }
3208 
3209  if (o->video_disable)
3210  st->discard = AVDISCARD_ALL;
3211  else if (video_discard)
3212  st->discard = video_discard;
3213  break;
3214  case AVMEDIA_TYPE_DATA:
3215  break;
3216  case AVMEDIA_TYPE_SUBTITLE:
3217  if (o->subtitle_disable)
3218  st->discard = AVDISCARD_ALL;
3219  break;
3221  case AVMEDIA_TYPE_UNKNOWN:
3222  break;
3223  default:
3224  abort();
3225  }
3226  }
3227 }
3228 
3229 static void assert_file_overwrite(const char *filename)
3230 {
3231  if (!file_overwrite &&
3232  (strchr(filename, ':') == NULL || filename[1] == ':' ||
3233  av_strstart(filename, "file:", NULL))) {
3234  if (avio_check(filename, 0) == 0) {
3235  if (!using_stdin) {
3236  fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3237  fflush(stderr);
3238  if (!read_yesno()) {
3239  fprintf(stderr, "Not overwriting - exiting\n");
3240  exit_program(1);
3241  }
3242  }
3243  else {
3244  fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3245  exit_program(1);
3246  }
3247  }
3248  }
3249 }
3250 
3251 static void dump_attachment(AVStream *st, const char *filename)
3252 {
3253  int ret;
3254  AVIOContext *out = NULL;
3255  AVDictionaryEntry *e;
3256 
3257  if (!st->codec->extradata_size) {
3258  av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
3259  nb_input_files - 1, st->index);
3260  return;
3261  }
3262  if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
3263  filename = e->value;
3264  if (!*filename) {
3265  av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
3266  "in stream #%d:%d.\n", nb_input_files - 1, st->index);
3267  exit_program(1);
3268  }
3269 
3270  assert_file_overwrite(filename);
3271 
3272  if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
3273  av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
3274  filename);
3275  exit_program(1);
3276  }
3277 
3278  avio_write(out, st->codec->extradata, st->codec->extradata_size);
3279  avio_flush(out);
3280  avio_close(out);
3281 }
3282 
3283 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
3284 {
3285  AVFormatContext *ic;
3287  int err, i, ret;
3288  int64_t timestamp;
3289  uint8_t buf[128];
3290  AVDictionary **opts;
3291  int orig_nb_streams; // number of streams before avformat_find_stream_info
3292 
3293  if (o->format) {
3294  if (!(file_iformat = av_find_input_format(o->format))) {
3295  av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
3296  exit_program(1);
3297  }
3298  }
3299 
3300  if (!strcmp(filename, "-"))
3301  filename = "pipe:";
3302 
3303  using_stdin |= !strncmp(filename, "pipe:", 5) ||
3304  !strcmp(filename, "/dev/stdin");
3305 
3306  /* get default parameters from command line */
3307  ic = avformat_alloc_context();
3308  if (!ic) {
3309  print_error(filename, AVERROR(ENOMEM));
3310  exit_program(1);
3311  }
3312  if (o->nb_audio_sample_rate) {
3313  snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
3314  av_dict_set(&format_opts, "sample_rate", buf, 0);
3315  }
3316  if (o->nb_audio_channels) {
3317  snprintf(buf, sizeof(buf), "%d", o->audio_channels[o->nb_audio_channels - 1].u.i);
3318  av_dict_set(&format_opts, "channels", buf, 0);
3319  }
3320  if (o->nb_frame_rates) {
3321  av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
3322  }
3323  if (o->nb_frame_sizes) {
3324  av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
3325  }
3326  if (o->nb_frame_pix_fmts)
3327  av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
3328 
3329  ic->flags |= AVFMT_FLAG_NONBLOCK;
3330  ic->interrupt_callback = int_cb;
3331 
3332  /* open the input file with generic libav function */
3333  err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
3334  if (err < 0) {
3335  print_error(filename, err);
3336  exit_program(1);
3337  }
3339 
3340  /* apply forced codec ids */
3341  for (i = 0; i < ic->nb_streams; i++)
3342  choose_decoder(o, ic, ic->streams[i]);
3343 
3344  /* Set AVCodecContext options for avformat_find_stream_info */
3346  orig_nb_streams = ic->nb_streams;
3347 
3348  /* If not enough info to get the stream parameters, we decode the
3349  first frames to get it. (used in mpeg case for example) */
3350  ret = avformat_find_stream_info(ic, opts);
3351  if (ret < 0) {
3352  av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
3353  avformat_close_input(&ic);
3354  exit_program(1);
3355  }
3356 
3357  timestamp = o->start_time;
3358  /* add the stream start time */
3359  if (ic->start_time != AV_NOPTS_VALUE)
3360  timestamp += ic->start_time;
3361 
3362  /* if seeking requested, we execute it */
3363  if (o->start_time != 0) {
3364  ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3365  if (ret < 0) {
3366  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
3367  filename, (double)timestamp / AV_TIME_BASE);
3368  }
3369  }
3370 
3371  /* update the current parameters so that they match the one of the input stream */
3372  add_input_streams(o, ic);
3373 
3374  /* dump the file content */
3375  av_dump_format(ic, nb_input_files, filename, 0);
3376 
3377  input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
3378  input_files[nb_input_files - 1].ctx = ic;
3379  input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams;
3380  input_files[nb_input_files - 1].ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
3381  input_files[nb_input_files - 1].nb_streams = ic->nb_streams;
3382  input_files[nb_input_files - 1].rate_emu = o->rate_emu;
3383 
3384  for (i = 0; i < o->nb_dump_attachment; i++) {
3385  int j;
3386 
3387  for (j = 0; j < ic->nb_streams; j++) {
3388  AVStream *st = ic->streams[j];
3389 
3390  if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
3391  dump_attachment(st, o->dump_attachment[i].u.str);
3392  }
3393  }
3394 
3395  for (i = 0; i < orig_nb_streams; i++)
3396  av_dict_free(&opts[i]);
3397  av_freep(&opts);
3398 
3399  reset_options(o);
3400  return 0;
3401 }
3402 
3403 static uint8_t *get_line(AVIOContext *s)
3404 {
3405  AVIOContext *line;
3406  uint8_t *buf;
3407  char c;
3408 
3409  if (avio_open_dyn_buf(&line) < 0) {
3410  av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
3411  exit_program(1);
3412  }
3413 
3414  while ((c = avio_r8(s)) && c != '\n')
3415  avio_w8(line, c);
3416  avio_w8(line, 0);
3417  avio_close_dyn_buf(line, &buf);
3418 
3419  return buf;
3420 }
3421 
3422 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
3423 {
3424  int i, ret = 1;
3425  char filename[1000];
3426  const char *base[3] = { getenv("AVCONV_DATADIR"),
3427  getenv("HOME"),
3429  };
3430 
3431  for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
3432  if (!base[i])
3433  continue;
3434  if (codec_name) {
3435  snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
3436  i != 1 ? "" : "/.avconv", codec_name, preset_name);
3437  ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3438  }
3439  if (ret) {
3440  snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
3441  i != 1 ? "" : "/.avconv", preset_name);
3442  ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3443  }
3444  }
3445  return ret;
3446 }
3447 
3449 {
3450  char *codec_name = NULL;
3451 
3452  MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
3453  if (!codec_name) {
3454  ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
3455  NULL, ost->st->codec->codec_type);
3456  ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
3457  } else if (!strcmp(codec_name, "copy"))
3458  ost->stream_copy = 1;
3459  else {
3460  ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
3461  ost->st->codec->codec_id = ost->enc->id;
3462  }
3463 }
3464 
3466 {
3467  OutputStream *ost;
3468  AVStream *st = avformat_new_stream(oc, NULL);
3469  int idx = oc->nb_streams - 1, ret = 0;
3470  char *bsf = NULL, *next, *codec_tag = NULL;
3471  AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
3472  double qscale = -1;
3473 
3474  if (!st) {
3475  av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
3476  exit_program(1);
3477  }
3478 
3479  if (oc->nb_streams - 1 < o->nb_streamid_map)
3480  st->id = o->streamid_map[oc->nb_streams - 1];
3481 
3482  output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
3483  nb_output_streams + 1);
3484  ost = &output_streams[nb_output_streams - 1];
3485  ost->file_index = nb_output_files;
3486  ost->index = idx;
3487  ost->st = st;
3488  st->codec->codec_type = type;
3489  choose_encoder(o, oc, ost);
3490  if (ost->enc) {
3491  AVIOContext *s = NULL;
3492  char *buf = NULL, *arg = NULL, *preset = NULL;
3493 
3494  ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
3495 
3496  MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
3497  if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
3498  do {
3499  buf = get_line(s);
3500  if (!buf[0] || buf[0] == '#') {
3501  av_free(buf);
3502  continue;
3503  }
3504  if (!(arg = strchr(buf, '='))) {
3505  av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
3506  exit_program(1);
3507  }
3508  *arg++ = 0;
3509  av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
3510  av_free(buf);
3511  } while (!s->eof_reached);
3512  avio_close(s);
3513  }
3514  if (ret) {
3516  "Preset %s specified for stream %d:%d, but could not be opened.\n",
3517  preset, ost->file_index, ost->index);
3518  exit_program(1);
3519  }
3520  }
3521 
3523  st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
3524 
3525  ost->max_frames = INT64_MAX;
3526  MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
3527 
3528  MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
3529  while (bsf) {
3530  if (next = strchr(bsf, ','))
3531  *next++ = 0;
3532  if (!(bsfc = av_bitstream_filter_init(bsf))) {
3533  av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
3534  exit_program(1);
3535  }
3536  if (bsfc_prev)
3537  bsfc_prev->next = bsfc;
3538  else
3539  ost->bitstream_filters = bsfc;
3540 
3541  bsfc_prev = bsfc;
3542  bsf = next;
3543  }
3544 
3545  MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
3546  if (codec_tag) {
3547  uint32_t tag = strtol(codec_tag, &next, 0);
3548  if (*next)
3549  tag = AV_RL32(codec_tag);
3550  st->codec->codec_tag = tag;
3551  }
3552 
3553  MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
3554  if (qscale >= 0 || same_quant) {
3555  st->codec->flags |= CODEC_FLAG_QSCALE;
3556  st->codec->global_quality = FF_QP2LAMBDA * qscale;
3557  }
3558 
3559  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
3561 
3562  av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
3563  return ost;
3564 }
3565 
3566 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3567 {
3568  int i;
3569  const char *p = str;
3570  for (i = 0;; i++) {
3571  dest[i] = atoi(p);
3572  if (i == 63)
3573  break;
3574  p = strchr(p, ',');
3575  if (!p) {
3576  av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3577  exit_program(1);
3578  }
3579  p++;
3580  }
3581 }
3582 
3584 {
3585  AVStream *st;
3586  OutputStream *ost;
3587  AVCodecContext *video_enc;
3588 
3589  ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
3590  st = ost->st;
3591  video_enc = st->codec;
3592 
3593  if (!ost->stream_copy) {
3594  const char *p = NULL;
3595  char *frame_rate = NULL, *frame_size = NULL;
3597  char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL;
3598  int i;
3599 
3600  MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
3601  if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
3602  av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
3603  exit_program(1);
3604  }
3605 
3606  MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
3607  if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
3608  av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
3609  exit_program(1);
3610  }
3611 
3612  MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
3613  if (frame_aspect_ratio)
3614  ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
3615 
3616  MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
3617  if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
3618  av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
3619  exit_program(1);
3620  }
3621  st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
3622 
3623  MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
3624  if (intra_matrix) {
3625  if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
3626  av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
3627  exit_program(1);
3628  }
3629  parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
3630  }
3631  MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
3632  if (inter_matrix) {
3633  if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
3634  av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
3635  exit_program(1);
3636  }
3637  parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
3638  }
3639 
3640  MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
3641  for (i = 0; p; i++) {
3642  int start, end, q;
3643  int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
3644  if (e != 3) {
3645  av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
3646  exit_program(1);
3647  }
3648  video_enc->rc_override =
3649  av_realloc(video_enc->rc_override,
3650  sizeof(RcOverride) * (i + 1));
3651  video_enc->rc_override[i].start_frame = start;
3652  video_enc->rc_override[i].end_frame = end;
3653  if (q > 0) {
3654  video_enc->rc_override[i].qscale = q;
3655  video_enc->rc_override[i].quality_factor = 1.0;
3656  }
3657  else {
3658  video_enc->rc_override[i].qscale = 0;
3659  video_enc->rc_override[i].quality_factor = -q/100.0;
3660  }
3661  p = strchr(p, '/');
3662  if (p) p++;
3663  }
3664  video_enc->rc_override_count = i;
3665  if (!video_enc->rc_initial_buffer_occupancy)
3666  video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
3667  video_enc->intra_dc_precision = intra_dc_precision - 8;
3668 
3669  /* two pass mode */
3670  if (do_pass) {
3671  if (do_pass == 1) {
3672  video_enc->flags |= CODEC_FLAG_PASS1;
3673  } else {
3674  video_enc->flags |= CODEC_FLAG_PASS2;
3675  }
3676  }
3677 
3679  if (ost->forced_keyframes)
3681 
3682  MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
3683 
3684  ost->top_field_first = -1;
3686 
3687 #if CONFIG_AVFILTER
3688  MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
3689  if (filters)
3690  ost->avfilter = av_strdup(filters);
3691 #endif
3692  } else {
3694  }
3695 
3696  return ost;
3697 }
3698 
3700 {
3701  AVStream *st;
3702  OutputStream *ost;
3703  AVCodecContext *audio_enc;
3704 
3705  ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
3706  st = ost->st;
3707 
3708  audio_enc = st->codec;
3709  audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
3710 
3711  if (!ost->stream_copy) {
3712  char *sample_fmt = NULL;
3713 
3714  MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
3715 
3716  MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
3717  if (sample_fmt &&
3718  (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
3719  av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
3720  exit_program(1);
3721  }
3722 
3723  MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
3724  }
3725 
3726  return ost;
3727 }
3728 
3730 {
3731  OutputStream *ost;
3732 
3733  ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
3734  if (!ost->stream_copy) {
3735  av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
3736  exit_program(1);
3737  }
3738 
3739  return ost;
3740 }
3741 
3743 {
3745  ost->stream_copy = 1;
3746  return ost;
3747 }
3748 
3750 {
3751  AVStream *st;
3752  OutputStream *ost;
3753  AVCodecContext *subtitle_enc;
3754 
3756  st = ost->st;
3757  subtitle_enc = st->codec;
3758 
3759  subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
3760 
3761  return ost;
3762 }
3763 
3764 /* arg format is "output-stream-index:streamid-value". */
3765 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
3766 {
3767  int idx;
3768  char *p;
3769  char idx_str[16];
3770 
3771  av_strlcpy(idx_str, arg, sizeof(idx_str));
3772  p = strchr(idx_str, ':');
3773  if (!p) {
3775  "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
3776  arg, opt);
3777  exit_program(1);
3778  }
3779  *p++ = '\0';
3780  idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
3781  o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
3782  o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
3783  return 0;
3784 }
3785 
3786 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
3787 {
3788  AVFormatContext *is = ifile->ctx;
3789  AVFormatContext *os = ofile->ctx;
3790  int i;
3791 
3792  for (i = 0; i < is->nb_chapters; i++) {
3793  AVChapter *in_ch = is->chapters[i], *out_ch;
3794  int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
3795  AV_TIME_BASE_Q, in_ch->time_base);
3796  int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
3798 
3799 
3800  if (in_ch->end < ts_off)
3801  continue;
3802  if (rt != INT64_MAX && in_ch->start > rt + ts_off)
3803  break;
3804 
3805  out_ch = av_mallocz(sizeof(AVChapter));
3806  if (!out_ch)
3807  return AVERROR(ENOMEM);
3808 
3809  out_ch->id = in_ch->id;
3810  out_ch->time_base = in_ch->time_base;
3811  out_ch->start = FFMAX(0, in_ch->start - ts_off);
3812  out_ch->end = FFMIN(rt, in_ch->end - ts_off);
3813 
3814  if (copy_metadata)
3815  av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
3816 
3817  os->nb_chapters++;
3818  os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
3819  if (!os->chapters)
3820  return AVERROR(ENOMEM);
3821  os->chapters[os->nb_chapters - 1] = out_ch;
3822  }
3823  return 0;
3824 }
3825 
3826 static void opt_output_file(void *optctx, const char *filename)
3827 {
3828  OptionsContext *o = optctx;
3829  AVFormatContext *oc;
3830  int i, err;
3831  AVOutputFormat *file_oformat;
3832  OutputStream *ost;
3833  InputStream *ist;
3834 
3835  if (!strcmp(filename, "-"))
3836  filename = "pipe:";
3837 
3838  oc = avformat_alloc_context();
3839  if (!oc) {
3840  print_error(filename, AVERROR(ENOMEM));
3841  exit_program(1);
3842  }
3843 
3844  if (o->format) {
3845  file_oformat = av_guess_format(o->format, NULL, NULL);
3846  if (!file_oformat) {
3847  av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
3848  exit_program(1);
3849  }
3850  } else {
3851  file_oformat = av_guess_format(NULL, filename, NULL);
3852  if (!file_oformat) {
3853  av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
3854  filename);
3855  exit_program(1);
3856  }
3857  }
3858 
3859  oc->oformat = file_oformat;
3860  oc->interrupt_callback = int_cb;
3861  av_strlcpy(oc->filename, filename, sizeof(oc->filename));
3862 
3863  if (!o->nb_stream_maps) {
3864  /* pick the "best" stream of each type */
3865 #define NEW_STREAM(type, index)\
3866  if (index >= 0) {\
3867  ost = new_ ## type ## _stream(o, oc);\
3868  ost->source_index = index;\
3869  ost->sync_ist = &input_streams[index];\
3870  input_streams[index].discard = 0;\
3871  }
3872 
3873  /* video: highest resolution */
3874  if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
3875  int area = 0, idx = -1;
3876  for (i = 0; i < nb_input_streams; i++) {
3877  ist = &input_streams[i];
3878  if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
3879  ist->st->codec->width * ist->st->codec->height > area) {
3880  area = ist->st->codec->width * ist->st->codec->height;
3881  idx = i;
3882  }
3883  }
3884  NEW_STREAM(video, idx);
3885  }
3886 
3887  /* audio: most channels */
3888  if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
3889  int channels = 0, idx = -1;
3890  for (i = 0; i < nb_input_streams; i++) {
3891  ist = &input_streams[i];
3892  if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
3893  ist->st->codec->channels > channels) {
3894  channels = ist->st->codec->channels;
3895  idx = i;
3896  }
3897  }
3898  NEW_STREAM(audio, idx);
3899  }
3900 
3901  /* subtitles: pick first */
3903  for (i = 0; i < nb_input_streams; i++)
3904  if (input_streams[i].st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
3905  NEW_STREAM(subtitle, i);
3906  break;
3907  }
3908  }
3909  /* do something with data? */
3910  } else {
3911  for (i = 0; i < o->nb_stream_maps; i++) {
3912  StreamMap *map = &o->stream_maps[i];
3913 
3914  if (map->disabled)
3915  continue;
3916 
3917  ist = &input_streams[input_files[map->file_index].ist_index + map->stream_index];
3918  switch (ist->st->codec->codec_type) {
3919  case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
3920  case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
3921  case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
3922  case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
3923  case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
3924  default:
3925  av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
3926  map->file_index, map->stream_index);
3927  exit_program(1);
3928  }
3929 
3930  ost->source_index = input_files[map->file_index].ist_index + map->stream_index;
3931  ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index +
3932  map->sync_stream_index];
3933  ist->discard = 0;
3934  }
3935  }
3936 
3937  /* handle attached files */
3938  for (i = 0; i < o->nb_attachments; i++) {
3939  AVIOContext *pb;
3940  uint8_t *attachment;
3941  const char *p;
3942  int64_t len;
3943 
3944  if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
3945  av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
3946  o->attachments[i]);
3947  exit_program(1);
3948  }
3949  if ((len = avio_size(pb)) <= 0) {
3950  av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
3951  o->attachments[i]);
3952  exit_program(1);
3953  }
3954  if (!(attachment = av_malloc(len))) {
3955  av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
3956  o->attachments[i]);
3957  exit_program(1);
3958  }
3959  avio_read(pb, attachment, len);
3960 
3961  ost = new_attachment_stream(o, oc);
3962  ost->stream_copy = 0;
3963  ost->source_index = -1;
3964  ost->attachment_filename = o->attachments[i];
3965  ost->st->codec->extradata = attachment;
3966  ost->st->codec->extradata_size = len;
3967 
3968  p = strrchr(o->attachments[i], '/');
3969  av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
3970  avio_close(pb);
3971  }
3972 
3973  output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
3974  output_files[nb_output_files - 1].ctx = oc;
3975  output_files[nb_output_files - 1].ost_index = nb_output_streams - oc->nb_streams;
3976  output_files[nb_output_files - 1].recording_time = o->recording_time;
3977  output_files[nb_output_files - 1].start_time = o->start_time;
3978  output_files[nb_output_files - 1].limit_filesize = o->limit_filesize;
3979  av_dict_copy(&output_files[nb_output_files - 1].opts, format_opts, 0);
3980 
3981  /* check filename in case of an image number is expected */
3982  if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
3983  if (!av_filename_number_test(oc->filename)) {
3984  print_error(oc->filename, AVERROR(EINVAL));
3985  exit_program(1);
3986  }
3987  }
3988 
3989  if (!(oc->oformat->flags & AVFMT_NOFILE)) {
3990  /* test if it already exists to avoid losing precious files */
3991  assert_file_overwrite(filename);
3992 
3993  /* open the file */
3994  if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
3995  &oc->interrupt_callback,
3996  &output_files[nb_output_files - 1].opts)) < 0) {
3997  print_error(filename, err);
3998  exit_program(1);
3999  }
4000  }
4001 
4002  if (o->mux_preload) {
4003  uint8_t buf[64];
4004  snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
4005  av_dict_set(&output_files[nb_output_files - 1].opts, "preload", buf, 0);
4006  }
4007  oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
4008  oc->flags |= AVFMT_FLAG_NONBLOCK;
4009 
4010  /* copy metadata */
4011  for (i = 0; i < o->nb_metadata_map; i++) {
4012  char *p;
4013  int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
4014 
4015  if (in_file_index < 0)
4016  continue;
4017  if (in_file_index >= nb_input_files) {
4018  av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
4019  exit_program(1);
4020  }
4021  copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index].ctx, o);
4022  }
4023 
4024  /* copy chapters */
4025  if (o->chapters_input_file >= nb_input_files) {
4026  if (o->chapters_input_file == INT_MAX) {
4027  /* copy chapters from the first input file that has them*/
4028  o->chapters_input_file = -1;
4029  for (i = 0; i < nb_input_files; i++)
4030  if (input_files[i].ctx->nb_chapters) {
4031  o->chapters_input_file = i;
4032  break;
4033  }
4034  } else {
4035  av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
4036  o->chapters_input_file);
4037  exit_program(1);
4038  }
4039  }
4040  if (o->chapters_input_file >= 0)
4041  copy_chapters(&input_files[o->chapters_input_file], &output_files[nb_output_files - 1],
4043 
4044  /* copy global metadata by default */
4046  av_dict_copy(&oc->metadata, input_files[0].ctx->metadata,
4048  if (!o->metadata_streams_manual)
4049  for (i = output_files[nb_output_files - 1].ost_index; i < nb_output_streams; i++) {
4050  InputStream *ist;
4051  if (output_streams[i].source_index < 0) /* this is true e.g. for attached files */
4052  continue;
4053  ist = &input_streams[output_streams[i].source_index];
4054  av_dict_copy(&output_streams[i].st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
4055  }
4056 
4057  /* process manually set metadata */
4058  for (i = 0; i < o->nb_metadata; i++) {
4059  AVDictionary **m;
4060  char type, *val;
4061  const char *stream_spec;
4062  int index = 0, j, ret;
4063 
4064  val = strchr(o->metadata[i].u.str, '=');
4065  if (!val) {
4066  av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
4067  o->metadata[i].u.str);
4068  exit_program(1);
4069  }
4070  *val++ = 0;
4071 
4072  parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
4073  if (type == 's') {
4074  for (j = 0; j < oc->nb_streams; j++) {
4075  if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
4076  av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
4077  } else if (ret < 0)
4078  exit_program(1);
4079  }
4080  printf("ret %d, stream_spec %s\n", ret, stream_spec);
4081  }
4082  else {
4083  switch (type) {
4084  case 'g':
4085  m = &oc->metadata;
4086  break;
4087  case 'c':
4088  if (index < 0 || index >= oc->nb_chapters) {
4089  av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
4090  exit_program(1);
4091  }
4092  m = &oc->chapters[index]->metadata;
4093  break;
4094  default:
4095  av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
4096  exit_program(1);
4097  }
4098  av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
4099  }
4100  }
4101 
4102  reset_options(o);
4103 }
4104 
4105 /* same option as mencoder */
4106 static int opt_pass(const char *opt, const char *arg)
4107 {
4108  do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
4109  return 0;
4110 }
4111 
4112 static int64_t getutime(void)
4113 {
4114 #if HAVE_GETRUSAGE
4115  struct rusage rusage;
4116 
4117  getrusage(RUSAGE_SELF, &rusage);
4118  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4119 #elif HAVE_GETPROCESSTIMES
4120  HANDLE proc;
4121  FILETIME c, e, k, u;
4122  proc = GetCurrentProcess();
4123  GetProcessTimes(proc, &c, &e, &k, &u);
4124  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4125 #else
4126  return av_gettime();
4127 #endif
4128 }
4129 
4130 static int64_t getmaxrss(void)
4131 {
4132 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4133  struct rusage rusage;
4134  getrusage(RUSAGE_SELF, &rusage);
4135  return (int64_t)rusage.ru_maxrss * 1024;
4136 #elif HAVE_GETPROCESSMEMORYINFO
4137  HANDLE proc;
4138  PROCESS_MEMORY_COUNTERS memcounters;
4139  proc = GetCurrentProcess();
4140  memcounters.cb = sizeof(memcounters);
4141  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4142  return memcounters.PeakPagefileUsage;
4143 #else
4144  return 0;
4145 #endif
4146 }
4147 
4148 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
4149 {
4150  return parse_option(o, "q:a", arg, options);
4151 }
4152 
4153 static void show_usage(void)
4154 {
4155  printf("Hyper fast Audio and Video encoder\n");
4156  printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
4157  printf("\n");
4158 }
4159 
4160 static void show_help(void)
4161 {
4164  show_usage();
4165  show_help_options(options, "Main options:\n",
4167  show_help_options(options, "\nAdvanced options:\n",
4169  OPT_EXPERT);
4170  show_help_options(options, "\nVideo options:\n",
4172  OPT_VIDEO);
4173  show_help_options(options, "\nAdvanced Video options:\n",
4175  OPT_VIDEO | OPT_EXPERT);
4176  show_help_options(options, "\nAudio options:\n",
4178  OPT_AUDIO);
4179  show_help_options(options, "\nAdvanced Audio options:\n",
4181  OPT_AUDIO | OPT_EXPERT);
4182  show_help_options(options, "\nSubtitle options:\n",
4184  OPT_SUBTITLE);
4185  show_help_options(options, "\nAudio/Video grab options:\n",
4186  OPT_GRAB,
4187  OPT_GRAB);
4188  printf("\n");
4192 }
4193 
4194 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
4195 {
4196  enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
4197  static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
4198 
4199  if (!strncmp(arg, "pal-", 4)) {
4200  norm = PAL;
4201  arg += 4;
4202  } else if (!strncmp(arg, "ntsc-", 5)) {
4203  norm = NTSC;
4204  arg += 5;
4205  } else if (!strncmp(arg, "film-", 5)) {
4206  norm = FILM;
4207  arg += 5;
4208  } else {
4209  /* Try to determine PAL/NTSC by peeking in the input files */
4210  if (nb_input_files) {
4211  int i, j, fr;
4212  for (j = 0; j < nb_input_files; j++) {
4213  for (i = 0; i < input_files[j].nb_streams; i++) {
4214  AVCodecContext *c = input_files[j].ctx->streams[i]->codec;
4215  if (c->codec_type != AVMEDIA_TYPE_VIDEO)
4216  continue;
4217  fr = c->time_base.den * 1000 / c->time_base.num;
4218  if (fr == 25000) {
4219  norm = PAL;
4220  break;
4221  } else if ((fr == 29970) || (fr == 23976)) {
4222  norm = NTSC;
4223  break;
4224  }
4225  }
4226  if (norm != UNKNOWN)
4227  break;
4228  }
4229  }
4230  if (norm != UNKNOWN)
4231  av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
4232  }
4233 
4234  if (norm == UNKNOWN) {
4235  av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
4236  av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
4237  av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
4238  exit_program(1);
4239  }
4240 
4241  if (!strcmp(arg, "vcd")) {
4242  opt_video_codec(o, "c:v", "mpeg1video");
4243  opt_audio_codec(o, "c:a", "mp2");
4244  parse_option(o, "f", "vcd", options);
4245 
4246  parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
4247  parse_option(o, "r", frame_rates[norm], options);
4248  opt_default("g", norm == PAL ? "15" : "18");
4249 
4250  opt_default("b", "1150000");
4251  opt_default("maxrate", "1150000");
4252  opt_default("minrate", "1150000");
4253  opt_default("bufsize", "327680"); // 40*1024*8;
4254 
4255  opt_default("b:a", "224000");
4256  parse_option(o, "ar", "44100", options);
4257  parse_option(o, "ac", "2", options);
4258 
4259  opt_default("packetsize", "2324");
4260  opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
4261 
4262  /* We have to offset the PTS, so that it is consistent with the SCR.
4263  SCR starts at 36000, but the first two packs contain only padding
4264  and the first pack from the other stream, respectively, may also have
4265  been written before.
4266  So the real data starts at SCR 36000+3*1200. */
4267  o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
4268  } else if (!strcmp(arg, "svcd")) {
4269 
4270  opt_video_codec(o, "c:v", "mpeg2video");
4271  opt_audio_codec(o, "c:a", "mp2");
4272  parse_option(o, "f", "svcd", options);
4273 
4274  parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
4275  parse_option(o, "r", frame_rates[norm], options);
4276  opt_default("g", norm == PAL ? "15" : "18");
4277 
4278  opt_default("b", "2040000");
4279  opt_default("maxrate", "2516000");
4280  opt_default("minrate", "0"); // 1145000;
4281  opt_default("bufsize", "1835008"); // 224*1024*8;
4282  opt_default("flags", "+scan_offset");
4283 
4284 
4285  opt_default("b:a", "224000");
4286  parse_option(o, "ar", "44100", options);
4287 
4288  opt_default("packetsize", "2324");
4289 
4290  } else if (!strcmp(arg, "dvd")) {
4291 
4292  opt_video_codec(o, "c:v", "mpeg2video");
4293  opt_audio_codec(o, "c:a", "ac3");
4294  parse_option(o, "f", "dvd", options);
4295 
4296  parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4297  parse_option(o, "r", frame_rates[norm], options);
4298  opt_default("g", norm == PAL ? "15" : "18");
4299 
4300  opt_default("b", "6000000");
4301  opt_default("maxrate", "9000000");
4302  opt_default("minrate", "0"); // 1500000;
4303  opt_default("bufsize", "1835008"); // 224*1024*8;
4304 
4305  opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
4306  opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
4307 
4308  opt_default("b:a", "448000");
4309  parse_option(o, "ar", "48000", options);
4310 
4311  } else if (!strncmp(arg, "dv", 2)) {
4312 
4313  parse_option(o, "f", "dv", options);
4314 
4315  parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4316  parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
4317  norm == PAL ? "yuv420p" : "yuv411p", options);
4318  parse_option(o, "r", frame_rates[norm], options);
4319 
4320  parse_option(o, "ar", "48000", options);
4321  parse_option(o, "ac", "2", options);
4322 
4323  } else {
4324  av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
4325  return AVERROR(EINVAL);
4326  }
4327  return 0;
4328 }
4329 
4330 static int opt_vstats_file(const char *opt, const char *arg)
4331 {
4333  vstats_filename = av_strdup (arg);
4334  return 0;
4335 }
4336 
4337 static int opt_vstats(const char *opt, const char *arg)
4338 {
4339  char filename[40];
4340  time_t today2 = time(NULL);
4341  struct tm *today = localtime(&today2);
4342 
4343  snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
4344  today->tm_sec);
4345  return opt_vstats_file(opt, filename);
4346 }
4347 
4348 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
4349 {
4350  return parse_option(o, "frames:v", arg, options);
4351 }
4352 
4353 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
4354 {
4355  return parse_option(o, "frames:a", arg, options);
4356 }
4357 
4358 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
4359 {
4360  return parse_option(o, "frames:d", arg, options);
4361 }
4362 
4363 static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
4364 {
4365  return parse_option(o, "tag:v", arg, options);
4366 }
4367 
4368 static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
4369 {
4370  return parse_option(o, "tag:a", arg, options);
4371 }
4372 
4373 static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
4374 {
4375  return parse_option(o, "tag:s", arg, options);
4376 }
4377 
4378 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
4379 {
4380  return parse_option(o, "filter:v", arg, options);
4381 }
4382 
4383 static int opt_vsync(const char *opt, const char *arg)
4384 {
4385  if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
4386  else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
4387  else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
4388 
4391  return 0;
4392 }
4393 
4394 #define OFFSET(x) offsetof(OptionsContext, x)
4395 static const OptionDef options[] = {
4396  /* main options */
4397 #include "cmdutils_common_opts.h"
4398  { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
4399  { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
4400  { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
4401  { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4402  { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4403  { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
4404  { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4405  { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
4406  "outfile[,metadata]:infile[,metadata]" },
4407  { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
4408  { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
4409  { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
4410  { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
4411  { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
4412  { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
4413  { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
4414  { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
4415  { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
4416  "add timings for benchmarking" },
4417  { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
4418  { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
4419  "dump each input packet" },
4420  { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
4421  "when dumping packets, also dump the payload" },
4422  { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
4423  { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
4424  { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
4425  { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
4426  { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
4427  { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)&copy_ts}, "copy timestamps" },
4428  { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)&copy_tb}, "copy input stream time base when stream copying" },
4429  { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
4430  { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
4431  { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
4432  { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
4433  { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
4434  { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
4435  { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4436  { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4437 #if CONFIG_AVFILTER
4438  { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
4439 #endif
4440  { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
4441  { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
4442  { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
4443 
4444  /* video options */
4445  { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
4446  { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4447  { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
4448  { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4449  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
4450  { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
4451  { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
4452  { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
4453  { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
4454  { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
4455  "use same quantizer as source (implies VBR)" },
4456  { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
4457  { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
4458  { "deinterlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_deinterlace},
4459  "deinterlace pictures" },
4460  { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
4461  { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
4462 #if CONFIG_AVFILTER
4463  { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
4464 #endif
4465  { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
4466  { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
4467  { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
4468  { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
4469  { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
4470  { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
4471  { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
4472  { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
4473  { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
4474 
4475  /* audio options */
4476  { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
4477  { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
4478  { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
4479  { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
4480  { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
4481  { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
4482  { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
4483  { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
4484  { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
4485 
4486  /* subtitle options */
4487  { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
4488  { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
4489  { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
4490 
4491  /* grab options */
4492  { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
4493 
4494  /* muxer options */
4495  { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
4496  { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
4497 
4498  { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
4499 
4500  /* data codec support */
4501  { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
4502 
4503  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
4504  { NULL, },
4505 };
4506 
4507 int main(int argc, char **argv)
4508 {
4509  OptionsContext o = { 0 };
4510  int64_t ti;
4511 
4512  reset_options(&o);
4513 
4515  parse_loglevel(argc, argv, options);
4516 
4518 #if CONFIG_AVDEVICE
4520 #endif
4521 #if CONFIG_AVFILTER
4523 #endif
4524  av_register_all();
4526 
4527  show_banner();
4528 
4529  /* parse options */
4530  parse_options(&o, argc, argv, options, opt_output_file);
4531 
4532  if (nb_output_files <= 0 && nb_input_files == 0) {
4533  show_usage();
4534  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4535  exit_program(1);
4536  }
4537 
4538  /* file converter / grab */
4539  if (nb_output_files <= 0) {
4540  fprintf(stderr, "At least one output file must be specified\n");
4541  exit_program(1);
4542  }
4543 
4544  if (nb_input_files == 0) {
4545  av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4546  exit_program(1);
4547  }
4548 
4549  ti = getutime();
4550  if (transcode(output_files, nb_output_files, input_files, nb_input_files) < 0)
4551  exit_program(1);
4552  ti = getutime() - ti;
4553  if (do_benchmark) {
4554  int maxrss = getmaxrss() / 1024;
4555  printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
4556  }
4557 
4558  exit_program(0);
4559  return 0;
4560 }