Go to the documentation of this file.
32 #define FF_INTERNAL_FIELDS 1
63 #define NB_ITEMS(list) (list ## _size / sizeof(*list))
64 #define FIFO_INIT_SIZE 8
65 #define FIFO_INIT_ELEMENT_SIZE sizeof(void *)
75 buf->peeked_frame =
in;
94 if (
buf->peeked_frame)
109 }
else if (
inlink->frame_wanted_out) {
137 params->pixel_fmts = pixel_fmts;
154 buf->warning_limit = 100;
162 if (
buf->warning_limit &&
165 "%d buffers queued in %s, something may be wrong.\n",
168 buf->warning_limit *= 10;
183 #define MAKE_AVFILTERLINK_ACCESSOR(type, field) \
184 type av_buffersink_get_##field(const AVFilterContext *ctx) { \
185 av_assert0(ctx->filter->activate == activate); \
186 return ctx->inputs[0]->field; \
218 #define CHECK_LIST_SIZE(field) \
219 if (buf->field ## _size % sizeof(*buf->field)) { \
220 av_log(ctx, AV_LOG_ERROR, "Invalid size for " #field ": %d, " \
221 "should be multiple of %d\n", \
222 buf->field ## _size, (int)sizeof(*buf->field)); \
223 return AVERROR(EINVAL); \
233 if (
buf->pixel_fmts_size) {
277 if (
buf->sample_fmts_size) {
285 if (
buf->channel_layouts_size ||
buf->channel_counts_size ||
286 buf->all_channel_counts) {
293 if (
buf->all_channel_counts) {
296 "Conflicting all_channel_counts and list in options\n");
304 if (
buf->sample_rates_size) {
316 #define OFFSET(x) offsetof(BufferSinkContext, x)
317 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
323 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
346 .
name =
"buffersink",
347 .description =
NULL_IF_CONFIG_SMALL(
"Buffer video frames, and make them available to the end of the filter graph."),
349 .priv_class = &buffersink_class,
367 .
name =
"abuffersink",
368 .description =
NULL_IF_CONFIG_SMALL(
"Buffer audio frames, and make them available to the end of the filter graph."),
369 .priv_class = &abuffersink_class,
static const AVFilterPad avfilter_vsink_buffer_inputs[]
A list of supported channel layouts.
#define AV_LOG_WARNING
Something somehow does not look correct.
int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples)
Same as av_buffersink_get_frame(), but with the ability to specify the number of samples read.
AVPixelFormat
Pixel format.
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
static const AVOption abuffersink_options[]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVSampleFormat * sample_fmts
list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
Struct to use for initializing an abuffersink context.
static enum AVSampleFormat sample_fmts[]
enum MovChannelLayoutTag * layouts
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int ff_filter_graph_run_once(AVFilterGraph *graph)
Run one round of processing on a filter graph.
const char * name
Filter name.
A link between two filters.
#define CHECK_LIST_SIZE(field)
int64_t * channel_layouts
list of accepted channel layouts, terminated by -1
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
@ AV_OPT_TYPE_BINARY
offset must point to a pointer immediately followed by an int for the length
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
enum AVPixelFormat * pixel_fmts
list of accepted pixel formats, must be terminated with -1
A filter pad used for either input or output.
#define AV_BUFFERSINK_FLAG_PEEK
Tell av_buffersink_get_buffer_ref() to read video/samples buffer reference, but not remove it from th...
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
static int return_or_keep_frame(BufferSinkContext *buf, AVFrame *out, AVFrame *in, int flags)
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
Get a frame with filtered data from sink and put it in frame.
AVFILTER_DEFINE_CLASS(buffersink)
static const AVFilterPad outputs[]
AVBufferSinkParams * av_buffersink_params_alloc(void)
Create an AVBufferSinkParams structure.
static int activate(AVFilterContext *ctx)
Describe the class of an AVClass context structure.
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
static av_cold int vsink_init(AVFilterContext *ctx, void *opaque)
Rational number (pair of numerator and denominator).
AVFilter ff_asink_abuffer
static int vsink_query_formats(AVFilterContext *ctx)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
static av_cold int asink_init(AVFilterContext *ctx, void *opaque)
static const AVOption buffersink_options[]
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
static const AVFilterPad avfilter_asink_abuffer_inputs[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
AVABufferSinkParams * av_abuffersink_params_alloc(void)
Create an AVABufferSinkParams structure.
static av_cold int common_init(AVFilterContext *ctx)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define i(width, name, range_min, range_max)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
AVSampleFormat
Audio sample formats.
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Pad name.
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
const char const char * params
Struct to use for initializing a buffersink context.
static size_t ff_framequeue_queued_frames(const FFFrameQueue *fq)
Get the number of queued frames.
static const uint8_t channel_counts[7]
int * channel_counts
list of accepted channel counts, terminated by -1
Filter the word “frame” indicates either a video frame or a group of audio samples
static int get_frame_internal(AVFilterContext *ctx, AVFrame *frame, int flags, int samples)
A reference to a data buffer.
#define MAKE_AVFILTERLINK_ACCESSOR(type, field)
static int asink_query_formats(AVFilterContext *ctx)
static const uint16_t channel_layouts[7]
static int query_formats(AVFilterContext *ctx)
#define flags(name, subs,...)
int * sample_rates
list of accepted sample rates, terminated by -1
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.