Go to the documentation of this file.
23 #include <sys/types.h>
25 #include <mfx/mfxvideo.h>
64 #define OFFSET(x) offsetof(QSVVP9EncContext, x)
65 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
76 #if QSV_HAVE_EXT_VP9_TILES
78 {
"tile_cols",
"Number of columns for tiled encoding",
OFFSET(qsv.tile_cols),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 32,
VE },
80 {
"tile_rows",
"Number of rows for tiled encoding",
OFFSET(qsv.tile_rows),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 4,
VE },
101 {
"flags",
"+cgop" },
119 .p.priv_class = &
class,
122 .p.wrapper_name =
"qsv",
AVPixelFormat
Pixel format.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static av_cold int qsv_enc_close(AVCodecContext *avctx)
This structure describes decoded (raw) audio or video data.
static const FFCodecDefault qsv_enc_defaults[]
AVCodec p
The public AVCodec.
#define FF_CODEC_ENCODE_CB(func)
const AVCodecHWConfigInternal *const ff_qsv_enc_hw_configs[]
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static av_cold int qsv_enc_init(AVCodecContext *avctx)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static int qsv_enc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
int ff_qsv_enc_close(AVCodecContext *avctx, QSVEncContext *q)
const char * name
Name of the codec implementation.
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
main external API structure.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
const FFCodec ff_vp9_qsv_encoder
int ff_qsv_enc_init(AVCodecContext *avctx, QSVEncContext *q)
#define AV_CODEC_CAP_HYBRID
Codec is potentially backed by a hardware implementation, but not necessarily.
This structure stores compressed data.
static const AVOption options[]
int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q, AVPacket *pkt, const AVFrame *frame, int *got_packet)