Go to the documentation of this file.
43 frame->nb_rpl_elems = 0;
54 int x_cb = x0 >>
s->ps.sps->log2_ctb_size;
55 int y_cb = y0 >>
s->ps.sps->log2_ctb_size;
56 int pic_width_cb =
s->ps.sps->ctb_width;
57 int ctb_addr_ts =
s->pps->ctb_addr_rs_to_ts[y_cb * pic_width_cb + x_cb];
58 return &
ref->rpl_tab[ctb_addr_ts]->refPicList[0];
93 frame->nb_rpl_elems =
s->pkt.nb_nals;
102 frame->ctb_count =
s->ps.sps->ctb_width *
s->ps.sps->ctb_height;
103 for (j = 0; j <
frame->ctb_count; j++)
134 if (
frame->f &&
frame->sequence ==
s->seq_decode &&
147 s->collocated_ref =
NULL;
149 if (
s->sh.pic_output_flag)
155 ref->sequence =
s->seq_decode;
156 ref->f->crop_left =
s->ps.sps->output_window.left_offset;
157 ref->f->crop_right =
s->ps.sps->output_window.right_offset;
158 ref->f->crop_top =
s->ps.sps->output_window.top_offset;
159 ref->f->crop_bottom =
s->ps.sps->output_window.bottom_offset;
176 if (
IS_IRAP(
s) &&
s->no_rasl_output_flag == 1) {
181 frame->sequence !=
s->seq_decode) {
182 if (
s->sh.no_output_of_prior_pics_flag == 1)
191 int min_poc = INT_MAX;
197 frame->sequence ==
s->seq_output) {
199 if (
frame->poc < min_poc || nb_output == 1) {
200 min_poc =
frame->poc;
207 if (!
flush &&
s->seq_output ==
s->seq_decode &&
s->ps.sps &&
208 nb_output <= s->
ps.sps->temporal_layer[
s->ps.sps->max_sub_layers - 1].num_reorder_pics)
229 "Output frame with POC %d.\n",
frame->poc);
233 if (
s->seq_output !=
s->seq_decode)
245 int min_poc = INT_MAX;
250 if ((
frame->flags) &&
251 frame->sequence ==
s->seq_output &&
257 if (
s->ps.sps && dpb >=
s->ps.sps->temporal_layer[
s->ps.sps->max_sub_layers - 1].max_dec_pic_buffering) {
260 if ((
frame->flags) &&
261 frame->sequence ==
s->seq_output &&
264 min_poc =
frame->poc;
272 frame->sequence ==
s->seq_output &&
273 frame->poc <= min_poc) {
285 int ctb_count =
frame->ctb_count;
286 int ctb_addr_ts =
s->pps->ctb_addr_rs_to_ts[
s->sh.slice_segment_addr];
289 if (
s->slice_idx >=
frame->nb_rpl_elems)
292 for (
i = ctb_addr_ts;
i < ctb_count;
i++)
313 s->rps[
LT_CURR].nb_refs) && !
s->pps->pps_curr_pic_ref_enabled_flag) {
318 for (list_idx = 0; list_idx < nb_list; list_idx++) {
320 RefPicList *rpl = &
s->cur_frame->refPicList[list_idx];
360 rpl->
ref[
i] = rpl_tmp.
ref[idx];
365 memcpy(rpl, &rpl_tmp,
sizeof(*rpl));
370 if (
s->pps->pps_curr_pic_ref_enabled_flag &&
387 int mask = use_msb ? ~0 : (1 <<
s->ps.sps->log2_max_poc_lsb) - 1;
392 if (
ref->f &&
ref->sequence ==
s->seq_decode) {
393 if ((
ref->poc &
mask) == poc && (use_msb ||
ref->poc !=
s->poc))
400 "Could not find ref with POC %d\n", poc);
419 if (!
s->avctx->hwaccel) {
420 if (!
s->ps.sps->pixel_shift) {
422 memset(
frame->f->data[
i], 1 << (
s->ps.sps->bit_depth - 1),
426 for (y = 0; y < (
s->ps.sps->height >>
s->ps.sps->vshift[
i]); y++) {
427 uint8_t *dst =
frame->f->data[
i] + y *
frame->f->linesize[
i];
428 AV_WN16(dst, 1 << (
s->ps.sps->bit_depth - 1));
446 int poc,
int ref_flag, uint8_t use_msb)
485 if (
frame ==
s->cur_frame)
499 if (!(short_rps->
used & (1 <<
i)))
501 else if (i < short_rps->num_negative_pics)
513 int poc = long_rps->
poc[
i];
548 if (
pps->pps_curr_pic_ref_enabled_flag)
const RefPicList * ff_hevc_get_ref_list(const HEVCContext *s, const HEVCFrame *ref, int x0, int y0)
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
uint8_t num_negative_pics
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
This structure describes decoded (raw) audio or video data.
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
This function sets up the ProgressFrame, i.e.
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
#define HEVC_FRAME_FLAG_LONG_REF
static void mark_ref(HEVCFrame *frame, int flag)
static HEVCFrame * alloc_frame(HEVCContext *s)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
#define HEVC_SEQUENCE_COUNTER_INVALID
#define HEVC_FRAME_FLAG_BUMPING
static const uint16_t mask[17]
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
#define AV_CEIL_RSHIFT(a, b)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
#define HEVC_FRAME_FLAG_SHORT_REF
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
struct HEVCFrame * ref[HEVC_MAX_REFS]
@ AV_PICTURE_STRUCTURE_BOTTOM_FIELD
coded as bottom field
#define HEVC_SEQUENCE_COUNTER_MASK
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
@ AV_PICTURE_STRUCTURE_TOP_FIELD
coded as top field
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
void(* flush)(AVBSFContext *ctx)
uint8_t poc_msb_present[32]
int ff_hevc_set_new_ref(HEVCContext *s, int poc)
static void * ff_refstruct_allocz(size_t size)
Equivalent to ff_refstruct_alloc_ext(size, 0, NULL, NULL)
static HEVCFrame * generate_missing_ref(HEVCContext *s, int poc)
void ff_hevc_unref_frame(HEVCFrame *frame, int flags)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
void ff_hevc_bump_frame(HEVCContext *s)
#define HEVC_FRAME_FLAG_OUTPUT
int ff_hevc_frame_nb_refs(const SliceHeader *sh, const HEVCPPS *pps)
Get the number of candidate references for the current frame.
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
static HEVCFrame * find_ref_idx(HEVCContext *s, int poc, uint8_t use_msb)
#define FF_THREAD_FRAME
Decode more than one frame at once.
#define i(width, name, range_min, range_max)
static int init_slice_rpl(HEVCContext *s)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int ref[MAX_W *MAX_W]
static int add_candidate_ref(HEVCContext *s, RefPicList *list, int poc, int ref_flag, uint8_t use_msb)
static void unref_missing_refs(HEVCContext *s)
#define flags(name, subs,...)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
int isLongTerm[HEVC_MAX_REFS]
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
void * ff_refstruct_pool_get(FFRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.