21 #ifndef AVFILTER_FRAMESYNC_H
22 #define AVFILTER_FRAMESYNC_H
300 #define FRAMESYNC_DEFINE_CLASS(name, context, field) \
301 static int name##_framesync_preinit(AVFilterContext *ctx) { \
302 context *s = ctx->priv; \
303 ff_framesync_preinit(&s->field); \
306 static const AVClass *name##_child_class_next(const AVClass *prev) { \
307 return prev ? NULL : framesync_get_class(); \
309 static void *name##_child_next(void *obj, void *prev) { \
311 s->fs.class = framesync_get_class(); \
312 return prev ? NULL : &s->field; \
314 static const AVClass name##_class = { \
315 .class_name = #name, \
316 .item_name = av_default_item_name, \
317 .option = name##_options, \
318 .version = LIBAVUTIL_VERSION_INT, \
319 .category = AV_CLASS_CATEGORY_FILTER, \
320 .child_class_next = name##_child_class_next, \
321 .child_next = name##_child_next, \
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
This structure describes decoded (raw) audio or video data.
int(* on_event)(struct FFFrameSync *fs)
Callback called when a frame event is ready.
int64_t pts
Timestamp of the current event.
enum FFFrameSyncExtMode before
Extrapolation mode for timestamps before the first frame.
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
AVFilterContext * parent
Parent filter context.
uint8_t have_next
Boolean flagging the next frame, for internal use.
unsigned sync_level
Synchronization level: only inputs with the same sync level are sync sources.
FFFrameSyncIn * in
Pointer to array of inputs.
enum FFFrameSyncExtMode after
Extrapolation mode for timestamps after the last frame.
AVFrame * frame_next
Next frame, for internal use.
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Same as ff_framesync_dualinput_get(), but make sure that f0 is writable.
AVRational time_base
Time base for the incoming frames.
uint8_t eof
Flag indicating that output has reached EOF.
unsigned in_request
Index of the input that requires a request.
int ff_framesync_init(FFFrameSync *fs, AVFilterContext *parent, unsigned nb_in)
Initialize a frame sync structure.
void ff_framesync_preinit(FFFrameSync *fs)
Pre-initialize a frame sync structure.
FFFrameSyncExtMode
This API is intended as a helper for filters that have several video input and need to combine them s...
AVRational time_base
Time base for the output events.
int ff_framesync_get_frame(FFFrameSync *fs, unsigned in, AVFrame **rframe, unsigned get)
Get the current frame in an input.
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
void * opaque
Opaque pointer, not used by the API.
Extend the frame to infinity.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
uint8_t state
State: before first, in stream or after EOF, for internal use.
unsigned sync
Synchronization level: frames on input at the highest sync level will generate output frame events...
Describe the class of an AVClass context structure.
Rational number (pair of numerator and denominator).
Ignore this stream and continue processing the other ones.
unsigned nb_in
Number of input streams.
AVFrame * frame
Current frame, may be NULL before the first one or after EOF.
uint8_t frame_ready
Flag indicating that a frame event is ready.
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
const AVClass * framesync_get_class(void)
Get the class for the framesync object.
int64_t pts
PTS of the current frame.
Completely stop all streams with this one.
int64_t pts_next
PTS of the next frame, for internal use.