Go to the documentation of this file.
35 switch (
frame->format) {
47 "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
53 (
const int[4]){
frame->width * 3 *
sizeof(float), 0, 0, 0}, 0,
frame->height,
60 bytewidth,
frame->height);
78 "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
84 (
const int[4]){
frame->width *
sizeof(float), 0, 0, 0}, 0,
frame->height,
105 switch (
frame->format) {
116 av_log(log_ctx,
AV_LOG_ERROR,
"Impossible to create scale context for the conversion "
117 "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
125 (
const int [4]){frame->width * 3 * sizeof(float), 0, 0, 0});
131 bytewidth,
frame->height);
148 av_log(log_ctx,
AV_LOG_ERROR,
"Impossible to create scale context for the conversion "
149 "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
157 (
const int [4]){frame->width * sizeof(float), 0, 0, 0});
187 av_log(log_ctx,
AV_LOG_ERROR,
"Impossible to create scale context for the conversion "
188 "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
195 av_log(log_ctx,
AV_LOG_ERROR,
"unable to get linesizes with av_image_fill_linesizes");
AVPixelFormat
Pixel format.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
This structure describes decoded (raw) audio or video data.
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
static DNNReturnType proc_from_frame_to_dnn_analytics(AVFrame *frame, DNNData *input, void *log_ctx)
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
#define SWS_FAST_BILINEAR
DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
static enum AVPixelFormat get_pixel_format(DNNData *data)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define AV_PIX_FMT_GRAYF32
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, DNNFunctionType func_type, void *log_ctx)
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane.
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static DNNReturnType proc_from_frame_to_dnn_frameprocessing(AVFrame *frame, DNNData *input, void *log_ctx)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.