#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#define STREAM_DURATION 10.0
#define STREAM_FRAME_RATE 25
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P
#define SCALE_FLAGS SWS_BICUBIC
{
printf(
"pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
}
{
fprintf(stderr, "Error sending a frame to the encoder: %s\n",
exit(1);
}
break;
fprintf(stderr,
"Error encoding a frame: %s\n",
av_err2str(
ret));
exit(1);
}
fprintf(stderr,
"Error while writing output packet: %s\n",
av_err2str(
ret));
exit(1);
}
}
}
{
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
exit(1);
}
fprintf(stderr, "Could not allocate AVPacket\n");
exit(1);
}
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
switch ((*codec)->type) {
c->sample_fmt = (*codec)->sample_fmts ?
if ((*codec)->supported_samplerates) {
c->sample_rate = (*codec)->supported_samplerates[0];
for (
i = 0; (*codec)->supported_samplerates[
i];
i++) {
if ((*codec)->supported_samplerates[
i] == 44100)
}
}
if ((*codec)->channel_layouts) {
c->channel_layout = (*codec)->channel_layouts[0];
for (
i = 0; (*codec)->channel_layouts[
i];
i++) {
}
}
break;
}
}
break;
default:
break;
}
}
uint64_t channel_layout,
{
fprintf(stderr, "Error allocating an audio frame\n");
exit(1);
}
frame->format = sample_fmt;
frame->channel_layout = channel_layout;
frame->nb_samples = nb_samples;
if (nb_samples) {
fprintf(stderr, "Error allocating an audio buffer\n");
exit(1);
}
}
}
{
int nb_samples;
fprintf(stderr,
"Could not open audio codec: %s\n",
av_err2str(
ret));
exit(1);
}
nb_samples = 10000;
else
nb_samples =
c->frame_size;
c->sample_rate, nb_samples);
c->sample_rate, nb_samples);
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
fprintf(stderr, "Could not allocate resampler context\n");
exit(1);
}
fprintf(stderr, "Failed to initialize the resampling context\n");
exit(1);
}
}
{
int16_t *q = (int16_t*)
frame->data[0];
for (j = 0; j <
frame->nb_samples; j++) {
*q++ = v;
}
}
{
int dst_nb_samples;
exit(1);
(
const uint8_t **)
frame->data,
frame->nb_samples);
fprintf(stderr, "Error while converting\n");
exit(1);
}
}
}
{
if (!picture)
fprintf(stderr, "Could not allocate frame data.\n");
exit(1);
}
return picture;
}
{
fprintf(stderr,
"Could not open video codec: %s\n",
av_err2str(
ret));
exit(1);
}
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
}
}
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
}
{
for (x = 0; x <
width; x++)
for (x = 0; x <
width / 2; x++) {
}
}
}
{
exit(1);
fprintf(stderr,
"Could not initialize the conversion context\n");
exit(1);
}
}
} else {
}
}
{
}
{
}
int main(
int argc,
char **argv)
{
const char *filename;
const AVCodec *audio_codec, *video_codec;
int have_video = 0, have_audio = 0;
int encode_video = 0, encode_audio = 0;
if (argc < 2) {
printf(
"usage: %s output_file\n"
"API example program to output a media file with libavformat.\n"
"This program generates a synthetic audio and video stream, encodes and\n"
"muxes them into a file named output_file.\n"
"The output format is automatically guessed according to the file extension.\n"
"Raw images can also be output by using '%%d' in the filename.\n"
"\n", argv[0]);
return 1;
}
filename = argv[1];
for (
i = 2;
i+1 < argc;
i+=2) {
if (!strcmp(argv[
i],
"-flags") || !strcmp(argv[
i],
"-fflags"))
}
if (!oc) {
printf(
"Could not deduce output format from file extension: using MPEG.\n");
}
if (!oc)
return 1;
have_video = 1;
encode_video = 1;
}
have_audio = 1;
encode_audio = 1;
}
if (have_video)
if (have_audio)
fprintf(stderr, "Could not open '%s': %s\n", filename,
return 1;
}
}
fprintf(stderr, "Error occurred when opening output file: %s\n",
return 1;
}
while (encode_video || encode_audio) {
if (encode_video &&
} else {
}
}
if (have_video)
if (have_audio)
return 0;
}
@ AV_SAMPLE_FMT_FLTP
float, planar
AVPixelFormat
Pixel format.
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
static AVFrame * alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
#define AVERROR_EOF
End of file.
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
const AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
This structure describes decoded (raw) audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
AVStream ** streams
A list of all streams in the file.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
static void open_audio(AVFormatContext *oc, const AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
static AVFrame * get_video_frame(OutputStream *ost)
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
@ AV_ROUND_UP
Round toward +infinity.
#define AV_CH_LAYOUT_STEREO
static void open_video(AVFormatContext *oc, const AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
int64_t swr_get_delay(struct SwrContext *s, int64_t base)
Gets the delay the next input sample will experience relative to the next output sample.
int main(int argc, char **argv)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
struct SwrContext * swr_ctx
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
static AVFrame * alloc_audio_frame(enum AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples)
static AVFrame * get_audio_frame(OutputStream *ost)
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
av_cold struct SwrContext * swr_alloc(void)
Allocate SwrContext.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AVIO_FLAG_WRITE
write-only
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
static enum AVPixelFormat pix_fmt
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
The libswresample context.
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
static AVFormatContext * fmt_ctx
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Rational number (pair of numerator and denominator).
AVIOContext * pb
I/O context.
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
struct SwsContext * sws_ctx
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
AVCodecID
Identify the syntax and semantics of the bitstream.
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t **out_arg, int out_count, const uint8_t **in_arg, int in_count)
Convert audio.
printf("static const uint8_t my_array[100] = {\n")
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
int channels
number of audio channels
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
AVSampleFormat
Audio sample formats.
@ AV_SAMPLE_FMT_S16
signed 16 bits
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c, AVStream *st, AVFrame *frame, AVPacket *pkt)
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
int id
Format-specific stream ID.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
const struct AVOutputFormat * oformat
The output container format.
main external API structure.
int index
stream index in AVFormatContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
static void close_stream(AVFormatContext *oc, OutputStream *ost)
This structure stores compressed data.
#define STREAM_FRAME_RATE
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
static void add_stream(OutputStream *ost, AVFormatContext *oc, const AVCodec **codec, enum AVCodecID codec_id)
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags)
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding