Go to the documentation of this file.
45 int is_ref,
int in_dpb,
int prev)
125 if (current_depth ==
ctx->max_b_depth || start->
next->
next == end) {
126 for (pic = start->
next; pic; pic = pic->
next) {
145 for (pic = start->
next; pic != end; pic = pic->
next)
147 for (pic = start->
next,
i = 1; 2 * i < len; pic = pic->next,
i++);
164 current_depth + 1, &next);
169 current_depth + 1, last);
182 for (
i = 0;
i <
ctx->nb_next_prev;
i++) {
183 --
ctx->next_prev[
i]->ref_count[0];
186 ctx->next_prev[0] = pic;
188 ctx->nb_next_prev = 1;
194 ctx->next_prev[
ctx->nb_next_prev++] = pic;
197 --
ctx->next_prev[0]->ref_count[0];
199 ctx->next_prev[
i] =
ctx->next_prev[
i + 1];
200 ctx->next_prev[
i] = pic;
210 int i, b_counter, closed_gop_end;
215 for (pic =
ctx->pic_start; pic; pic = pic->
next) {
237 "encode next.\n", pic->
b_depth);
246 closed_gop_end =
ctx->closed_gop ||
247 ctx->idr_counter ==
ctx->gop_per_idr;
248 for (pic =
ctx->pic_start; pic; pic = next) {
258 if (b_counter ==
ctx->b_per_p)
262 if (
ctx->gop_counter + b_counter + closed_gop_end >=
ctx->gop_size)
266 if (next && next->force_idr)
272 if (!pic &&
ctx->end_of_stream) {
283 "need more input for reference pictures.\n");
286 if (
ctx->input_order <=
ctx->decode_delay && !
ctx->end_of_stream) {
288 "need more input for timestamps.\n");
296 ctx->idr_counter = 1;
297 ctx->gop_counter = 1;
299 }
else if (
ctx->gop_counter + b_counter >=
ctx->gop_size) {
300 if (
ctx->idr_counter ==
ctx->gop_per_idr) {
304 ctx->idr_counter = 1;
311 ctx->gop_counter = 1;
314 if (
ctx->gop_counter + b_counter + closed_gop_end ==
ctx->gop_size) {
323 ctx->gop_counter += 1 + b_counter;
334 for (
i = 0;
i <
ctx->nb_next_prev;
i++)
364 for (pic =
ctx->pic_start; pic; pic = pic->
next) {
370 for (pic =
ctx->pic_start; pic; pic = pic->
next) {
377 for (pic =
ctx->pic_start; pic; pic = next) {
384 ctx->pic_start = next;
385 ctx->op->free(avctx, pic);
399 frame->crop_left ||
frame->crop_right) && !
ctx->crop_warned) {
401 "frames ignored due to lack of API support.\n");
402 ctx->crop_warned = 1;
405 if (!
ctx->roi_allowed) {
409 if (sd && !
ctx->roi_warned) {
411 "frames ignored due to lack of driver support.\n");
443 if (
ctx->recon_frames_ref) {
479 if (
ctx->input_order == 0)
480 ctx->first_pts = pic->
pts;
481 if (
ctx->input_order ==
ctx->decode_delay)
482 ctx->dts_pts_diff = pic->
pts -
ctx->first_pts;
483 if (
ctx->output_delay > 0)
484 ctx->ts_ring[
ctx->input_order %
485 (3 *
ctx->output_delay +
ctx->async_depth)] = pic->
pts;
490 if (
ctx->pic_start) {
491 ctx->pic_end->next = pic;
494 ctx->pic_start = pic;
498 err =
ctx->op->init(avctx, pic);
502 ctx->end_of_stream = 1;
506 if (
ctx->input_order <=
ctx->decode_delay)
507 ctx->dts_pts_diff =
ctx->pic_end->pts -
ctx->first_pts;
513 ctx->op->free(avctx, pic);
542 if (
ctx->output_delay == 0) {
551 (3 *
ctx->output_delay +
ctx->async_depth)];
565 ctx->op->output &&
ctx->op->free);
569 if (
ctx->tail_pkt->size) {
573 else if (!
tmp->next) {
590 if (!
ctx->pic_start) {
591 if (
ctx->end_of_stream)
597 if (
ctx->async_encode) {
604 err =
ctx->op->issue(avctx, pic);
631 err =
ctx->op->issue(avctx, pic);
640 err =
ctx->op->output(avctx, pic,
pkt);
662 uint32_t ref_l0, uint32_t ref_l1,
663 int flags,
int prediction_pre_only)
668 }
else if (ref_l0 < 1) {
670 "reference frames.\n");
676 "(supported references: %d / %d).\n",
680 "(supported references: %d / %d).\n", ref_l0, ref_l1);
682 ctx->p_per_i = INT_MAX;
687 "(supported references: %d / %d).\n",
691 "(supported references: %d / %d).\n", ref_l0, ref_l1);
693 ctx->p_per_i = INT_MAX;
699 ctx->max_b_depth = 1;
705 ctx->gop_per_idr =
ctx->idr_interval + 1;
708 ctx->gop_per_idr = 1;
734 if (
ctx->input_frames->sw_format ==
736 recon_format =
ctx->input_frames->sw_format;
747 recon_format =
ctx->input_frames->sw_format;
757 "size %dx%d (constraints: width %d-%d height %d-%d).\n",
758 ctx->surface_width,
ctx->surface_height,
774 ctx->log_ctx = (
void *)avctx;
782 "required to associate the encoding device.\n");
787 if (!
ctx->input_frames_ref)
793 if (!
ctx->device_ref)
808 next_pic = pic->next;
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
size_t av_fifo_can_write(const AVFifo *f)
struct FFHWBaseEncodePicture * next
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
#define AVERROR_EOF
End of file.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int capabilities
Codec capabilities.
int ff_hw_base_encode_init(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx)
#define AV_LOG_VERBOSE
Detailed information.
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
@ FF_HW_FLAG_B_PICTURE_REFERENCES
static void hw_base_encode_add_ref(FFHWBaseEncodePicture *pic, FFHWBaseEncodePicture *target, int is_ref, int in_dpb, int prev)
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
AVHWFramesConstraints * av_hwdevice_get_hwframe_constraints(AVBufferRef *ref, const void *hwconfig)
Get the constraints on HW frames given a device and the HW-specific configuration to be used with tha...
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
This struct describes the constraints on hardware frames attached to a given device with a hardware-s...
#define AV_CODEC_FLAG_COPY_OPAQUE
const struct AVCodec * codec
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
int ff_hw_base_encode_close(FFHWBaseEncodeContext *ctx)
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
int min_width
The minimum size of frames in this hw_frames_ctx.
int flags
AV_CODEC_FLAG_*.
int ff_hw_base_init_gop_structure(FFHWBaseEncodeContext *ctx, AVCodecContext *avctx, uint32_t ref_l0, uint32_t ref_l1, int flags, int prediction_pre_only)
struct FFHWBaseEncodePicture * prev
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
int ff_hw_base_get_recon_format(FFHWBaseEncodeContext *ctx, const void *hwconfig, enum AVPixelFormat *fmt)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int hw_base_encode_clear_old(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx)
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
enum AVPixelFormat * valid_sw_formats
A list of possible values for sw_format in the hw_frames_ctx, terminated by AV_PIX_FMT_NONE.
void av_hwframe_constraints_free(AVHWFramesConstraints **constraints)
Free an AVHWFrameConstraints structure.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void hw_base_encode_set_b_pictures(FFHWBaseEncodeContext *ctx, FFHWBaseEncodePicture *start, FFHWBaseEncodePicture *end, FFHWBaseEncodePicture *prev, int current_depth, FFHWBaseEncodePicture **last)
void * opaque
for some private data of the user
struct FFHWBaseEncodePicture * dpb[MAX_DPB_SIZE]
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
@ AV_PICTURE_TYPE_I
Intra.
size_t av_fifo_can_read(const AVFifo *f)
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
int ff_hw_base_encode_set_output_property(FFHWBaseEncodeContext *ctx, AVCodecContext *avctx, FFHWBaseEncodePicture *pic, AVPacket *pkt, int flag_no_delay)
static int hw_base_encode_check_frame(FFHWBaseEncodeContext *ctx, const AVFrame *frame)
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
int nb_refs[MAX_REFERENCE_LIST_NUM]
@ FF_HW_FLAG_NON_IDR_KEY_PICTURES
int ff_hw_base_encode_receive_packet(FFHWBaseEncodeContext *ctx, AVCodecContext *avctx, AVPacket *pkt)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
int max_width
The maximum size of frames in this hw_frames_ctx.
int flags
A combination of AV_PKT_FLAG values.
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
struct FFHWBaseEncodePicture * refs[MAX_REFERENCE_LIST_NUM][MAX_PICTURE_REFERENCES]
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames.
This struct describes a set or pool of "hardware" frames (i.e.
static void hw_base_encode_add_next_prev(FFHWBaseEncodeContext *ctx, FFHWBaseEncodePicture *pic)
#define AV_CODEC_FLAG_CLOSED_GOP
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int base_encode_pic_free(FFHWBaseEncodePicture *pic)
main external API structure.
static int hw_base_encode_pick_next(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx, FFHWBaseEncodePicture **pic_out)
static void hw_base_encode_remove_refs(FFHWBaseEncodePicture *pic, int level)
static int ref[MAX_W *MAX_W]
static int hw_base_encode_send_frame(AVCodecContext *avctx, FFHWBaseEncodeContext *ctx, AVFrame *frame)
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
Called by encoders to get the next frame for encoding.
Structure to hold side data for an AVFrame.
This structure stores compressed data.
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
#define flags(name, subs,...)
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
#define MAX_PICTURE_REFERENCES