Libav
output.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <string.h>
35 #include <math.h>
36 
38 #include "libavutil/mathematics.h"
39 #include "libavutil/opt.h"
40 #include "libavformat/avformat.h"
42 #include "libswscale/swscale.h"
43 
44 /* 5 seconds stream duration */
45 #define STREAM_DURATION 5.0
46 #define STREAM_FRAME_RATE 25 /* 25 images/s */
47 #define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
48 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
49 
50 #define SCALE_FLAGS SWS_BICUBIC
51 
52 // a wrapper around a single output AVStream
53 typedef struct OutputStream {
54  AVStream *st;
56 
57  /* pts of the next frame that will be generated */
58  int64_t next_pts;
59 
62 
63  float t, tincr, tincr2;
64 
67 } OutputStream;
68 
69 /**************************************************************/
70 /* audio output */
71 
72 /*
73  * add an audio output stream
74  */
76  enum AVCodecID codec_id)
77 {
78  AVCodecContext *c;
79  AVCodec *codec;
80  int ret;
81 
82  /* find the audio encoder */
83  codec = avcodec_find_encoder(codec_id);
84  if (!codec) {
85  fprintf(stderr, "codec not found\n");
86  exit(1);
87  }
88 
89  ost->st = avformat_new_stream(oc, NULL);
90  if (!ost->st) {
91  fprintf(stderr, "Could not alloc stream\n");
92  exit(1);
93  }
94 
95  c = avcodec_alloc_context3(codec);
96  if (!c) {
97  fprintf(stderr, "Could not alloc an encoding context\n");
98  exit(1);
99  }
100  ost->enc = c;
101 
102  /* put sample parameters */
103  c->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_S16;
104  c->sample_rate = codec->supported_samplerates ? codec->supported_samplerates[0] : 44100;
107  c->bit_rate = 64000;
108 
109  ost->st->time_base = (AVRational){ 1, c->sample_rate };
110 
111  // some formats want stream headers to be separate
112  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
114 
115  /* initialize sample format conversion;
116  * to simplify the code, we always pass the data through lavr, even
117  * if the encoder supports the generated format directly -- the price is
118  * some extra data copying;
119  */
120  ost->avr = avresample_alloc_context();
121  if (!ost->avr) {
122  fprintf(stderr, "Error allocating the resampling context\n");
123  exit(1);
124  }
125 
126  av_opt_set_int(ost->avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
127  av_opt_set_int(ost->avr, "in_sample_rate", 44100, 0);
128  av_opt_set_int(ost->avr, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
129  av_opt_set_int(ost->avr, "out_sample_fmt", c->sample_fmt, 0);
130  av_opt_set_int(ost->avr, "out_sample_rate", c->sample_rate, 0);
131  av_opt_set_int(ost->avr, "out_channel_layout", c->channel_layout, 0);
132 
133  ret = avresample_open(ost->avr);
134  if (ret < 0) {
135  fprintf(stderr, "Error opening the resampling context\n");
136  exit(1);
137  }
138 }
139 
140 static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
141  uint64_t channel_layout,
142  int sample_rate, int nb_samples)
143 {
145  int ret;
146 
147  if (!frame) {
148  fprintf(stderr, "Error allocating an audio frame\n");
149  exit(1);
150  }
151 
152  frame->format = sample_fmt;
153  frame->channel_layout = channel_layout;
154  frame->sample_rate = sample_rate;
155  frame->nb_samples = nb_samples;
156 
157  if (nb_samples) {
158  ret = av_frame_get_buffer(frame, 0);
159  if (ret < 0) {
160  fprintf(stderr, "Error allocating an audio buffer\n");
161  exit(1);
162  }
163  }
164 
165  return frame;
166 }
167 
169 {
170  AVCodecContext *c;
171  int nb_samples, ret;
172 
173  c = ost->enc;
174 
175  /* open it */
176  if (avcodec_open2(c, NULL, NULL) < 0) {
177  fprintf(stderr, "could not open codec\n");
178  exit(1);
179  }
180 
181  /* init signal generator */
182  ost->t = 0;
183  ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
184  /* increment frequency by 110 Hz per second */
185  ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
186 
188  nb_samples = 10000;
189  else
190  nb_samples = c->frame_size;
191 
193  c->sample_rate, nb_samples);
195  44100, nb_samples);
196 
197  /* copy the stream parameters to the muxer */
199  if (ret < 0) {
200  fprintf(stderr, "Could not copy the stream parameters\n");
201  exit(1);
202  }
203 }
204 
205 /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
206  * 'nb_channels' channels. */
208 {
209  AVFrame *frame = ost->tmp_frame;
210  int j, i, v;
211  int16_t *q = (int16_t*)frame->data[0];
212 
213  /* check if we want to generate more frames */
214  if (av_compare_ts(ost->next_pts, ost->enc->time_base,
215  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
216  return NULL;
217 
218 
219  for (j = 0; j < frame->nb_samples; j++) {
220  v = (int)(sin(ost->t) * 10000);
221  for (i = 0; i < ost->enc->channels; i++)
222  *q++ = v;
223  ost->t += ost->tincr;
224  ost->tincr += ost->tincr2;
225  }
226 
227  return frame;
228 }
229 
230 /* if a frame is provided, send it to the encoder, otherwise flush the encoder;
231  * return 1 when encoding is finished, 0 otherwise
232  */
234  AVFrame *frame)
235 {
236  AVPacket pkt = { 0 }; // data and size must be 0;
237  int got_packet;
238 
239  av_init_packet(&pkt);
240  avcodec_encode_audio2(ost->enc, &pkt, frame, &got_packet);
241 
242  if (got_packet) {
243  pkt.stream_index = ost->st->index;
244 
245  av_packet_rescale_ts(&pkt, ost->enc->time_base, ost->st->time_base);
246 
247  /* Write the compressed frame to the media file. */
248  if (av_interleaved_write_frame(oc, &pkt) != 0) {
249  fprintf(stderr, "Error while writing audio frame\n");
250  exit(1);
251  }
252  }
253 
254  return (frame || got_packet) ? 0 : 1;
255 }
256 
257 /*
258  * encode one audio frame and send it to the muxer
259  * return 1 when encoding is finished, 0 otherwise
260  */
262 {
263  AVFrame *frame;
264  int got_output = 0;
265  int ret;
266 
267  frame = get_audio_frame(ost);
268  got_output |= !!frame;
269 
270  /* feed the data to lavr */
271  if (frame) {
272  ret = avresample_convert(ost->avr, NULL, 0, 0,
273  frame->extended_data, frame->linesize[0],
274  frame->nb_samples);
275  if (ret < 0) {
276  fprintf(stderr, "Error feeding audio data to the resampler\n");
277  exit(1);
278  }
279  }
280 
281  while ((frame && avresample_available(ost->avr) >= ost->frame->nb_samples) ||
282  (!frame && avresample_get_out_samples(ost->avr, 0))) {
283  /* when we pass a frame to the encoder, it may keep a reference to it
284  * internally;
285  * make sure we do not overwrite it here
286  */
287  ret = av_frame_make_writable(ost->frame);
288  if (ret < 0)
289  exit(1);
290 
291  /* the difference between the two avresample calls here is that the
292  * first one just reads the already converted data that is buffered in
293  * the lavr output buffer, while the second one also flushes the
294  * resampler */
295  if (frame) {
296  ret = avresample_read(ost->avr, ost->frame->extended_data,
297  ost->frame->nb_samples);
298  } else {
299  ret = avresample_convert(ost->avr, ost->frame->extended_data,
300  ost->frame->linesize[0], ost->frame->nb_samples,
301  NULL, 0, 0);
302  }
303 
304  if (ret < 0) {
305  fprintf(stderr, "Error while resampling\n");
306  exit(1);
307  } else if (frame && ret != ost->frame->nb_samples) {
308  fprintf(stderr, "Too few samples returned from lavr\n");
309  exit(1);
310  }
311 
312  ost->frame->nb_samples = ret;
313 
314  ost->frame->pts = ost->next_pts;
315  ost->next_pts += ost->frame->nb_samples;
316 
317  got_output |= encode_audio_frame(oc, ost, ret ? ost->frame : NULL);
318  }
319 
320  return !got_output;
321 }
322 
323 /**************************************************************/
324 /* video output */
325 
326 /* Add a video output stream. */
328  enum AVCodecID codec_id)
329 {
330  AVCodecContext *c;
331  AVCodec *codec;
332 
333  /* find the video encoder */
334  codec = avcodec_find_encoder(codec_id);
335  if (!codec) {
336  fprintf(stderr, "codec not found\n");
337  exit(1);
338  }
339 
340  ost->st = avformat_new_stream(oc, NULL);
341  if (!ost->st) {
342  fprintf(stderr, "Could not alloc stream\n");
343  exit(1);
344  }
345 
346  c = avcodec_alloc_context3(codec);
347  if (!c) {
348  fprintf(stderr, "Could not alloc an encoding context\n");
349  exit(1);
350  }
351  ost->enc = c;
352 
353  /* Put sample parameters. */
354  c->bit_rate = 400000;
355  /* Resolution must be a multiple of two. */
356  c->width = 352;
357  c->height = 288;
358  /* timebase: This is the fundamental unit of time (in seconds) in terms
359  * of which frame timestamps are represented. For fixed-fps content,
360  * timebase should be 1/framerate and timestamp increments should be
361  * identical to 1. */
362  ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
363  c->time_base = ost->st->time_base;
364 
365  c->gop_size = 12; /* emit one intra frame every twelve frames at most */
366  c->pix_fmt = STREAM_PIX_FMT;
367  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
368  /* just for testing, we also add B-frames */
369  c->max_b_frames = 2;
370  }
371  if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
372  /* Needed to avoid using macroblocks in which some coeffs overflow.
373  * This does not happen with normal video, it just happens here as
374  * the motion of the chroma plane does not match the luma plane. */
375  c->mb_decision = 2;
376  }
377  /* Some formats want stream headers to be separate. */
378  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
380 }
381 
383 {
384  AVFrame *picture;
385  int ret;
386 
387  picture = av_frame_alloc();
388  if (!picture)
389  return NULL;
390 
391  picture->format = pix_fmt;
392  picture->width = width;
393  picture->height = height;
394 
395  /* allocate the buffers for the frame data */
396  ret = av_frame_get_buffer(picture, 32);
397  if (ret < 0) {
398  fprintf(stderr, "Could not allocate frame data.\n");
399  exit(1);
400  }
401 
402  return picture;
403 }
404 
406 {
407  AVCodecContext *c;
408  int ret;
409 
410  c = ost->enc;
411 
412  /* open the codec */
413  if (avcodec_open2(c, NULL, NULL) < 0) {
414  fprintf(stderr, "could not open codec\n");
415  exit(1);
416  }
417 
418  /* Allocate the encoded raw picture. */
419  ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
420  if (!ost->frame) {
421  fprintf(stderr, "Could not allocate picture\n");
422  exit(1);
423  }
424 
425  /* If the output format is not YUV420P, then a temporary YUV420P
426  * picture is needed too. It is then converted to the required
427  * output format. */
428  ost->tmp_frame = NULL;
429  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
431  if (!ost->tmp_frame) {
432  fprintf(stderr, "Could not allocate temporary picture\n");
433  exit(1);
434  }
435  }
436 
437  /* copy the stream parameters to the muxer */
439  if (ret < 0) {
440  fprintf(stderr, "Could not copy the stream parameters\n");
441  exit(1);
442  }
443 }
444 
445 /* Prepare a dummy image. */
446 static void fill_yuv_image(AVFrame *pict, int frame_index,
447  int width, int height)
448 {
449  int x, y, i, ret;
450 
451  /* when we pass a frame to the encoder, it may keep a reference to it
452  * internally;
453  * make sure we do not overwrite it here
454  */
455  ret = av_frame_make_writable(pict);
456  if (ret < 0)
457  exit(1);
458 
459  i = frame_index;
460 
461  /* Y */
462  for (y = 0; y < height; y++)
463  for (x = 0; x < width; x++)
464  pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
465 
466  /* Cb and Cr */
467  for (y = 0; y < height / 2; y++) {
468  for (x = 0; x < width / 2; x++) {
469  pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
470  pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
471  }
472  }
473 }
474 
476 {
477  AVCodecContext *c = ost->enc;
478 
479  /* check if we want to generate more frames */
480  if (av_compare_ts(ost->next_pts, c->time_base,
481  STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
482  return NULL;
483 
484  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
485  /* as we only generate a YUV420P picture, we must convert it
486  * to the codec pixel format if needed */
487  if (!ost->sws_ctx) {
488  ost->sws_ctx = sws_getContext(c->width, c->height,
490  c->width, c->height,
491  c->pix_fmt,
493  if (!ost->sws_ctx) {
494  fprintf(stderr,
495  "Cannot initialize the conversion context\n");
496  exit(1);
497  }
498  }
499  fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
500  sws_scale(ost->sws_ctx, ost->tmp_frame->data, ost->tmp_frame->linesize,
501  0, c->height, ost->frame->data, ost->frame->linesize);
502  } else {
503  fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
504  }
505 
506  ost->frame->pts = ost->next_pts++;
507 
508  return ost->frame;
509 }
510 
511 /*
512  * encode one video frame and send it to the muxer
513  * return 1 when encoding is finished, 0 otherwise
514  */
516 {
517  int ret;
518  AVCodecContext *c;
519  AVFrame *frame;
520  AVPacket pkt = { 0 };
521  int got_packet = 0;
522 
523  c = ost->enc;
524 
525  frame = get_video_frame(ost);
526 
527  av_init_packet(&pkt);
528 
529  /* encode the image */
530  ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
531  if (ret < 0) {
532  fprintf(stderr, "Error encoding a video frame\n");
533  exit(1);
534  }
535 
536  if (got_packet) {
537  av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
538  pkt.stream_index = ost->st->index;
539 
540  /* Write the compressed frame to the media file. */
541  ret = av_interleaved_write_frame(oc, &pkt);
542  }
543 
544  if (ret != 0) {
545  fprintf(stderr, "Error while writing video frame\n");
546  exit(1);
547  }
548 
549  return (frame || got_packet) ? 0 : 1;
550 }
551 
553 {
554  avcodec_free_context(&ost->enc);
555  av_frame_free(&ost->frame);
556  av_frame_free(&ost->tmp_frame);
557  sws_freeContext(ost->sws_ctx);
558  avresample_free(&ost->avr);
559 }
560 
561 /**************************************************************/
562 /* media file output */
563 
564 int main(int argc, char **argv)
565 {
566  OutputStream video_st = { 0 }, audio_st = { 0 };
567  const char *filename;
568  AVOutputFormat *fmt;
569  AVFormatContext *oc;
570  int have_video = 0, have_audio = 0;
571  int encode_video = 0, encode_audio = 0;
572 
573  /* Initialize libavcodec, and register all codecs and formats. */
574  av_register_all();
575 
576  if (argc != 2) {
577  printf("usage: %s output_file\n"
578  "API example program to output a media file with libavformat.\n"
579  "The output format is automatically guessed according to the file extension.\n"
580  "Raw images can also be output by using '%%d' in the filename\n"
581  "\n", argv[0]);
582  return 1;
583  }
584 
585  filename = argv[1];
586 
587  /* Autodetect the output format from the name. default is MPEG. */
588  fmt = av_guess_format(NULL, filename, NULL);
589  if (!fmt) {
590  printf("Could not deduce output format from file extension: using MPEG.\n");
591  fmt = av_guess_format("mpeg", NULL, NULL);
592  }
593  if (!fmt) {
594  fprintf(stderr, "Could not find suitable output format\n");
595  return 1;
596  }
597 
598  /* Allocate the output media context. */
599  oc = avformat_alloc_context();
600  if (!oc) {
601  fprintf(stderr, "Memory error\n");
602  return 1;
603  }
604  oc->oformat = fmt;
605  snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
606 
607  /* Add the audio and video streams using the default format codecs
608  * and initialize the codecs. */
609  if (fmt->video_codec != AV_CODEC_ID_NONE) {
610  add_video_stream(&video_st, oc, fmt->video_codec);
611  have_video = 1;
612  encode_video = 1;
613  }
614  if (fmt->audio_codec != AV_CODEC_ID_NONE) {
616  have_audio = 1;
617  encode_audio = 1;
618  }
619 
620  /* Now that all the parameters are set, we can open the audio and
621  * video codecs and allocate the necessary encode buffers. */
622  if (have_video)
623  open_video(oc, &video_st);
624  if (have_audio)
625  open_audio(oc, &audio_st);
626 
627  av_dump_format(oc, 0, filename, 1);
628 
629  /* open the output file, if needed */
630  if (!(fmt->flags & AVFMT_NOFILE)) {
631  if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
632  fprintf(stderr, "Could not open '%s'\n", filename);
633  return 1;
634  }
635  }
636 
637  /* Write the stream header, if any. */
639 
640  while (encode_video || encode_audio) {
641  /* select the stream to encode */
642  if (encode_video &&
643  (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
644  audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
645  encode_video = !write_video_frame(oc, &video_st);
646  } else {
647  encode_audio = !process_audio_stream(oc, &audio_st);
648  }
649  }
650 
651  /* Write the trailer, if any. The trailer must be written before you
652  * close the CodecContexts open when you wrote the header; otherwise
653  * av_write_trailer() may try to use memory that was freed on
654  * av_codec_close(). */
655  av_write_trailer(oc);
656 
657  /* Close each codec. */
658  if (have_video)
659  close_stream(oc, &video_st);
660  if (have_audio)
661  close_stream(oc, &audio_st);
662 
663  if (!(fmt->flags & AVFMT_NOFILE))
664  /* Close the output file. */
665  avio_close(oc->pb);
666 
667  /* free the stream */
669 
670  return 0;
671 }
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:928
int64_t next_pts
Definition: output.c:58
const struct AVCodec * codec
Definition: avcodec.h:1418
This structure describes decoded (raw) audio or video data.
Definition: frame.h:140
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:2010
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:662
static void close_stream(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:552
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:252
int avresample_convert(AVAudioResampleContext *avr, uint8_t **output, int out_plane_size, int out_samples, uint8_t *const *input, int in_plane_size, int in_samples)
Convert input samples and write them to the output FIFO.
Definition: utils.c:330
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1679
int avresample_read(AVAudioResampleContext *avr, uint8_t **output, int nb_samples)
Read samples from the output FIFO.
Definition: utils.c:772
enum AVCodecID video_codec
default video codec
Definition: avformat.h:461
int index
stream index in AVFormatContext
Definition: avformat.h:706
float tincr
Definition: output.c:63
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:369
attribute_deprecated int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1270
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1621
#define AV_CH_LAYOUT_STEREO
AVCodec.
Definition: avcodec.h:3120
void avresample_free(AVAudioResampleContext **avr)
Free AVAudioResampleContext and associated AVOption values.
Definition: utils.c:278
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1535
Format I/O context.
Definition: avformat.h:940
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2160
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:469
#define STREAM_DURATION
Definition: output.c:45
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:68
AVOptions.
AVAudioResampleContext * avr
Definition: output.c:66
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:211
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:2648
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:136
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
Definition: utils.c:1319
external api for the swscale stuff
static AVFrame * alloc_audio_frame(enum AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples)
Definition: output.c:140
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
Definition: output.c:446
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:959
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:453
const uint64_t * channel_layouts
array of support channel layouts, or NULL if unknown. array is terminated by 0
Definition: avcodec.h:3144
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:193
int width
width and height of the video frame
Definition: frame.h:179
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:409
static int encode_audio_frame(AVFormatContext *oc, OutputStream *ost, AVFrame *frame)
Definition: output.c:233
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:80
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:983
int capabilities
Codec capabilities.
Definition: avcodec.h:3139
AVStream * video_st
Definition: movenc.c:59
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1503
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:307
AVFrame * frame
Definition: output.c:60
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:909
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2203
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:104
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:294
int bit_rate
the average bitrate
Definition: avcodec.h:1473
static AVFrame * get_video_frame(OutputStream *ost)
Definition: output.c:475
audio channel layout utility functions
char filename[1024]
input or output filename
Definition: avformat.h:1016
external API header
static int process_audio_stream(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:261
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:136
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:515
enum AVCodecID codec_id
Definition: avconv_vaapi.c:149
int width
picture width / height.
Definition: avcodec.h:1580
static AVFrame * get_audio_frame(OutputStream *ost)
Definition: output.c:207
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:419
static void add_video_stream(OutputStream *ost, AVFormatContext *oc, enum AVCodecID codec_id)
Definition: output.c:327
int main(int argc, char **argv)
Definition: output.c:564
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:1679
AVOutputFormat * av_guess_format(const char *short_name, const char *filename, const char *mime_type)
Return the output format in the list of registered output formats which best matches the provided par...
Definition: format.c:104
int mb_decision
macroblock decision mode
Definition: avcodec.h:1953
enum AVPixelFormat pix_fmt
Definition: movenc.c:853
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:198
AVCodecContext * enc
Definition: output.c:55
if(ac->has_optimized_func)
Stream structure.
Definition: avformat.h:705
static void add_audio_stream(OutputStream *ost, AVFormatContext *oc, enum AVCodecID codec_id)
Definition: output.c:75
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:191
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2172
NULL
Definition: eval.c:55
int avresample_available(AVAudioResampleContext *avr)
Return the number of available samples in the output FIFO.
Definition: utils.c:748
static int width
Definition: utils.c:156
#define STREAM_PIX_FMT
Definition: output.c:48
AVSampleFormat
Audio Sample Formats.
Definition: samplefmt.h:60
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:151
enum AVCodecID codec_id
Definition: avcodec.h:1426
int sample_rate
samples per second
Definition: avcodec.h:2152
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:158
AVIOContext * pb
I/O context.
Definition: avformat.h:982
AVStream * audio_st
Definition: movenc.c:59
main external API structure.
Definition: avcodec.h:1409
int avresample_get_out_samples(AVAudioResampleContext *avr, int in_nb_samples)
Provide the upper bound on the number of samples the configured conversion would output.
Definition: utils.c:753
int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
Scale the image slice in srcSlice and put the resulting scaled slice in the image in dst...
int sample_rate
Sample rate of the audio data.
Definition: frame.h:289
rational number numerator/denominator
Definition: rational.h:43
attribute_deprecated int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:1384
#define SCALE_FLAGS
Definition: output.c:50
static AVFrame * alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
Definition: output.c:382
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2837
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:850
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:2594
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:186
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:344
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:146
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:784
int height
Definition: gxfenc.c:72
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1606
AVAudioResampleContext * avresample_alloc_context(void)
Allocate AVAudioResampleContext and set options.
Definition: options.c:96
Main libavformat public API header.
static void open_video(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:405
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:59
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:412
signed 16 bits
Definition: samplefmt.h:63
AVStream * st
Definition: avconv.h:346
static void open_audio(AVFormatContext *oc, OutputStream *ost)
Definition: output.c:168
float t
Definition: output.c:63
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:31
AVCodec * enc
Definition: avconv.h:367
int channels
number of audio channels
Definition: avcodec.h:2153
enum AVCodecID audio_codec
default audio codec
Definition: avformat.h:460
const int * supported_samplerates
array of supported audio samplerates, or NULL if unknown, array is terminated by 0 ...
Definition: avcodec.h:3142
AVFrame * tmp_frame
Definition: output.c:61
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:714
int height
Definition: frame.h:179
AVCodecParameters * codecpar
Definition: avformat.h:831
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
Definition: avcodec.h:3143
int stream_index
Definition: avcodec.h:1348
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:742
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:174
#define STREAM_FRAME_RATE
Definition: output.c:46
AVPixelFormat
Pixel format.
Definition: pixfmt.h:57
This structure stores compressed data.
Definition: avcodec.h:1323
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:44
int avresample_open(AVAudioResampleContext *avr)
Initialize AVAudioResampleContext.
Definition: utils.c:36
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:184
float tincr2
Definition: output.c:63
for(j=16;j >0;--j)
struct SwsContext * sws_ctx
Definition: output.c:65