Go to the documentation of this file.
52 #define TL_ADD(t, s) do { \
53 av_assert0(l->nb_tabs < TAB_MAX); \
54 l->tabs[l->nb_tabs].tab = (void**)&fc->tab.t; \
55 l->tabs[l->nb_tabs].size = sizeof(*fc->tab.t) * (s); \
104 const int ctu_size =
sps ? (1 <<
sps->ctb_log2_size_y <<
sps->ctb_log2_size_y) : 0;
105 const int ctu_count =
pps ?
pps->ctb_count : 0;
106 const int changed =
fc->tab.sz.ctu_count != ctu_count ||
fc->tab.sz.ctu_size != ctu_size;
112 TL_ADD(deblock, ctu_count);
115 TL_ADD(slice_idx, ctu_count);
122 const int pic_size_in_min_cb =
pps ?
pps->min_cb_width *
pps->min_cb_height : 0;
123 const int changed =
fc->tab.sz.pic_size_in_min_cb != pic_size_in_min_cb;
128 TL_ADD(imm, pic_size_in_min_cb);
131 TL_ADD(cb_width[
i], pic_size_in_min_cb);
137 const int pic_size_in_min_cb =
pps ?
pps->min_cb_width *
pps->min_cb_height : 0;
138 const int changed =
fc->tab.sz.pic_size_in_min_cb != pic_size_in_min_cb;
143 TL_ADD(imtf, pic_size_in_min_cb);
144 TL_ADD(ipm, pic_size_in_min_cb);
147 TL_ADD(cqt_depth[
i], pic_size_in_min_cb);
148 TL_ADD(cb_pos_x[
i], pic_size_in_min_cb);
149 TL_ADD(cb_pos_y[
i], pic_size_in_min_cb);
150 TL_ADD(cb_height[
i], pic_size_in_min_cb);
152 TL_ADD(cpm[
i], pic_size_in_min_cb);
161 const int pic_size_in_min_pu =
pps ?
pps->min_pu_width *
pps->min_pu_height : 0;
162 const int changed =
fc->tab.sz.pic_size_in_min_pu != pic_size_in_min_pu;
166 TL_ADD(iaf, pic_size_in_min_pu);
172 const int pic_size_in_min_pu =
pps ?
pps->min_pu_width *
pps->min_pu_height : 0;
173 const int changed =
fc->tab.sz.pic_size_in_min_pu != pic_size_in_min_pu;
177 TL_ADD(msf, pic_size_in_min_pu);
178 TL_ADD(mmi, pic_size_in_min_pu);
179 TL_ADD(mvf, pic_size_in_min_pu);
185 const int pic_size_in_min_tu =
pps ?
pps->min_tu_width *
pps->min_tu_height : 0;
186 const int changed =
fc->tab.sz.pic_size_in_min_tu != pic_size_in_min_tu;
190 TL_ADD(tu_joint_cbcr_residual_flag, pic_size_in_min_tu);
193 TL_ADD(pcmf[
i], pic_size_in_min_tu);
196 TL_ADD(tu_coded_flag[
i], pic_size_in_min_tu);
198 for (
int vertical = 0; vertical < 2; vertical++)
199 TL_ADD(bs[vertical][
i], pic_size_in_min_tu);
206 const int pic_size_in_min_tu =
pps ?
pps->min_tu_width *
pps->min_tu_height : 0;
207 const int changed =
fc->tab.sz.pic_size_in_min_tu != pic_size_in_min_tu;
212 TL_ADD(tb_pos_x0[
i], pic_size_in_min_tu);
213 TL_ADD(tb_pos_y0[
i], pic_size_in_min_tu);
214 TL_ADD(tb_width[
i], pic_size_in_min_tu);
215 TL_ADD(tb_height[
i], pic_size_in_min_tu);
218 for (
int vertical = 0; vertical < 2; vertical++) {
219 TL_ADD(max_len_p[vertical], pic_size_in_min_tu);
220 TL_ADD(max_len_q[vertical], pic_size_in_min_tu);
225 TL_ADD(qp[
i], pic_size_in_min_tu);
234 const int ctu_width =
pps ?
pps->ctb_width : 0;
235 const int ctu_height =
pps ?
pps->ctb_height : 0;
236 const int chroma_idc =
sps ?
sps->r->sps_chroma_format_idc : 0;
237 const int ps =
sps ?
sps->pixel_shift : 0;
239 const int changed =
fc->tab.sz.chroma_format_idc != chroma_idc ||
241 fc->tab.sz.ctu_width != ctu_width ||
fc->tab.sz.ctu_height != ctu_height ||
242 fc->tab.sz.pixel_shift != ps;
246 for (
int c_idx = 0; c_idx < c_end; c_idx++) {
249 TL_ADD(sao_pixel_buffer_h[c_idx], (
w * 2 * ctu_height) << ps);
250 TL_ADD(sao_pixel_buffer_v[c_idx], (
h * 2 * ctu_width) << ps);
253 for (
int c_idx = 0; c_idx < c_end; c_idx++) {
257 for (
int i = 0;
i < 2;
i++) {
258 TL_ADD(alf_pixel_buffer_h[c_idx][
i], (
w * border_pixels * ctu_height) << ps);
295 const int ctu_height =
pps ?
pps->ctb_height : 0;
296 const int ctu_size =
sps ?
sps->ctb_size_y : 0;
297 const int ps =
sps ?
sps->pixel_shift : 0;
298 const int chroma_idc =
sps ?
sps->r->sps_chroma_format_idc : 0;
299 const int has_ibc =
sps ?
sps->r->sps_ibc_enabled_flag : 0;
300 const int changed =
fc->tab.sz.chroma_format_idc != chroma_idc ||
301 fc->tab.sz.ctu_height != ctu_height ||
302 fc->tab.sz.ctu_size != ctu_size ||
303 fc->tab.sz.pixel_shift != ps;
310 const int hs =
sps ?
sps->hshift[
i] : 0;
311 const int vs =
sps ?
sps->vshift[
i] : 0;
312 TL_ADD(ibc_vir_buf[
i],
fc->tab.sz.ibc_buffer_width * ctu_size * ctu_height << ps >> hs >> vs);
349 for (
int i = 0;
i <
fc->tab.sz.ctu_count;
i++)
361 memset(&
fc->tab.sz, 0,
sizeof(
fc->tab.sz));
368 const int ctu_count =
pps->ctb_count;
369 const int pic_size_in_min_pu =
pps->min_pu_width *
pps->min_pu_height;
379 memset(
fc->tab.cus, 0,
sizeof(*
fc->tab.cus) * ctu_count);
381 memset(
fc->tab.slice_idx, -1,
sizeof(*
fc->tab.slice_idx) * ctu_count);
383 if (
fc->tab.sz.ctu_count != ctu_count) {
386 if (!
fc->rpl_tab_pool)
390 if (
fc->tab.sz.pic_size_in_min_pu != pic_size_in_min_pu) {
394 if (!
fc->tab_dmvr_mvf_pool)
398 fc->tab.sz.ctu_count =
pps->ctb_count;
399 fc->tab.sz.ctu_size = 1 <<
sps->ctb_log2_size_y <<
sps->ctb_log2_size_y;
400 fc->tab.sz.pic_size_in_min_cb =
pps->min_cb_width *
pps->min_cb_height;
401 fc->tab.sz.pic_size_in_min_pu = pic_size_in_min_pu;
402 fc->tab.sz.pic_size_in_min_tu =
pps->min_tu_width *
pps->min_tu_height;
403 fc->tab.sz.width =
pps->width;
404 fc->tab.sz.height =
pps->height;
405 fc->tab.sz.ctu_width =
pps->ctb_width;
406 fc->tab.sz.ctu_height =
pps->ctb_height;
407 fc->tab.sz.chroma_format_idc =
sps->r->sps_chroma_format_idc;
408 fc->tab.sz.pixel_shift =
sps->pixel_shift;
420 return diff > 0 && (idx < 0 ||
diff < min_diff);
425 return diff < 0 && (idx < 0 || diff > max_diff);
434 const int poc =
fc->ref->poc;
440 if (find(idx,
diff, old_diff)) {
471 for (
int i = 0;
i <
fc->nb_slices_allocated;
i++) {
482 fc->nb_slices_allocated = 0;
489 const int size = (
fc->nb_slices_allocated + 1) * 3 / 2;
491 if (
fc->nb_slices <
fc->nb_slices_allocated)
499 for (
int i =
fc->nb_slices_allocated;
i <
size;
i++) {
501 if (!
fc->slices[
i]) {
502 fc->nb_slices_allocated =
i;
505 fc->slices[
i]->slice_idx =
i;
507 fc->nb_slices_allocated =
size;
521 if (index < rsh->num_entry_points) {
555 if (sc->
nb_eps != nb_eps) {
573 for (
int j = ep->
ctu_start; j < ep->ctu_end; j++) {
591 const int size =
s->nb_fcs;
613 dst->nb_rpl_elems =
src->nb_rpl_elems;
616 dst->ctb_count =
src->ctb_count;
618 dst->scaling_win =
src->scaling_win;
619 dst->ref_width =
src->ref_width;
620 dst->ref_height =
src->ref_height;
623 dst->sequence =
src->sequence;
652 if (!
fc->output_frame)
657 if (!
fc->DPB[j].frame)
678 if (
s->nb_frames &&
s->nb_fcs > 1) {
691 s->seq_decode = (
s->seq_decode + 1) & 0xff;
711 s->poc_tid0 =
ph->poc;
749 if (is_first_slice) {
753 }
else if (
fc->ref) {
758 "Error constructing the reference lists for the current slice.\n");
779 c->pix_fmt =
sps->pix_fmt;
780 c->coded_width =
pps->width;
781 c->coded_height =
pps->height;
782 c->width =
pps->width - ((
pps->r->pps_conf_win_left_offset +
pps->r->pps_conf_win_right_offset) <<
sps->hshift[
CHROMA]);
783 c->height =
pps->height - ((
pps->r->pps_conf_win_top_offset +
pps->r->pps_conf_win_bottom_offset) <<
sps->vshift[
CHROMA]);
784 c->has_b_frames =
sps->r->sps_dpb_params.dpb_max_num_reorder_pics[
sps->r->sps_max_sublayers_minus1];
805 const int is_first_slice = !
fc->nb_slices;
811 sc =
fc->slices[
fc->nb_slices];
813 s->vcl_unit_type = nal->
type;
814 if (is_first_slice) {
840 "Decoding of multilayer bitstreams");
844 switch (unit->
type) {
878 s->last_eos =
s->eos;
888 for (
int i = 0;
i <
frame->nb_units;
i++) {
898 "Error parsing NAL unit #%d.\n",
i);
952 if (
s->nb_delayed >=
s->nb_fcs) {
962 while (
s->nb_delayed) {
997 fc->decode_order =
s->nb_frames;
1018 while (
s->nb_delayed)
1026 s->ps.sps_id_used = 0;
1039 for (
int i = 0;
i <
s->nb_fcs;
i++)
1054 #define VVC_MAX_DELAYED_FRAMES 16
1081 for (
int i = 0;
i <
s->nb_fcs;
i++) {
1088 if (thread_count == 1)
#define AV_LOG_WARNING
Something somehow does not look correct.
void * content_ref
If content is reference counted, a RefStruct reference backing content.
uint32_t num_ctus_in_curr_slice
NumCtusInCurrSlice.
struct AVFrame * output_frame
#define VVC_MAX_DELAYED_FRAMES
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static atomic_int cpu_count
static void min_cb_tl_init(TabList *l, VVCFrameContext *fc)
int ff_vvc_decode_frame_ps(VVCFrameParamSets *fps, struct VVCContext *s)
static void smvd_ref_idx(const VVCFrameContext *fc, SliceContext *sc)
uint32_t entry_point_start_ctu[VVC_MAX_ENTRY_POINTS]
entry point start in ctu_addr
FFRefStructPool * ff_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to ff_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
static int tl_create(TabList *l)
static int slices_realloc(VVCFrameContext *fc)
av_cold void ff_cbs_fragment_free(CodedBitstreamFragment *frag)
Free the units array of a fragment in addition to what ff_cbs_fragment_reset does.
void ff_vvc_report_frame_finished(VVCFrame *frame)
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
VVCFrame DPB[VVC_MAX_DPB_SIZE+1]
static int get_bits_count(const GetBitContext *s)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int FUNC() ph(CodedBitstreamContext *ctx, RWContext *rw, H266RawPH *current)
This structure describes decoded (raw) audio or video data.
void ff_cbs_fragment_reset(CodedBitstreamFragment *frag)
Free the units contained in a fragment as well as the fragment's own data buffer, but not the units a...
FFExecutor * ff_vvc_executor_alloc(VVCContext *s, const int thread_count)
static void free_cus(VVCFrameContext *fc)
void(* tl_init_fn)(TabList *l, VVCFrameContext *fc)
const H266RawSliceHeader * r
RefStruct reference.
static void msm_tl_init(TabList *l, VVCFrameContext *fc)
CodedBitstreamUnitType type
Codec-specific type of this unit.
#define fc(width, name, range_min, range_max)
static void ctu_nz_tl_init(TabList *l, VVCFrameContext *fc)
int ff_vvc_slice_rpl(VVCContext *s, VVCFrameContext *fc, SliceContext *sc)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
static void min_pu_tl_init(TabList *l, VVCFrameContext *fc)
Coded bitstream unit structure.
av_cold void ff_cbs_close(CodedBitstreamContext **ctx_ptr)
Close a context and free all internal state.
static void skip_bits(GetBitContext *s, int n)
static void pixel_buffer_nz_tl_init(TabList *l, VVCFrameContext *fc)
AVCodec p
The public AVCodec.
static int decode_nal_units(VVCContext *s, VVCFrameContext *fc, AVPacket *avpkt)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
int ff_vvc_decode_sh(VVCSH *sh, const VVCFrameParamSets *fps, const CodedBitstreamUnit *unit)
int flags
AV_CODEC_FLAG_*.
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
static int tl_zero(TabList *l)
void ff_vvc_executor_free(FFExecutor **e)
void ff_vvc_frame_ps_free(VVCFrameParamSets *fps)
int ff_vvc_frame_submit(VVCContext *s, VVCFrameContext *fc)
#define GDR_SET_RECOVERED(s)
static int frame_context_setup(VVCFrameContext *fc, VVCContext *s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int vvc_decode_frame(AVCodecContext *avctx, AVFrame *output, int *got_output, AVPacket *avpkt)
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
static av_cold int vvc_decode_init(AVCodecContext *avctx)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
VVCRefPic refs[VVC_MAX_REF_ENTRIES]
int(* smvd_find_fxn)(const int idx, const int diff, const int old_diff)
void ff_vvc_unref_frame(VVCFrameContext *fc, VVCFrame *frame, int flags)
int ff_vvc_frame_wait(VVCContext *s, VVCFrameContext *fc)
Coded bitstream fragment structure, combining one or more units.
#define FF_CODEC_DECODE_CB(func)
const FFCodec ff_vvc_decoder
#define AV_CEIL_RSHIFT(a, b)
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
static int frame_context_for_each_tl(VVCFrameContext *fc, int(*unary_fn)(TabList *l))
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
static int pic_arrays_init(VVCContext *s, VVCFrameContext *fc)
static av_cold int vvc_decode_free(AVCodecContext *avctx)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static void pic_arrays_free(VVCFrameContext *fc)
static int slice_init_entry_points(SliceContext *sc, VVCFrameContext *fc, const H2645NAL *nal, const CodedBitstreamUnit *unit)
void ff_vvc_clear_refs(VVCFrameContext *fc)
uint8_t ff_vvc_default_scale_m[64 *64]
static void min_tu_tl_init(TabList *l, VVCFrameContext *fc)
static VVCFrameContext * get_frame_context(const VVCContext *s, const VVCFrameContext *fc, const int delta)
static av_cold void frame_context_free(VVCFrameContext *fc)
int ff_vvc_frame_thread_init(VVCFrameContext *fc)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
int8_t ref_idx_sym[2]
RefIdxSymL0, RefIdxSymL1.
static int decode_nal_unit(VVCContext *s, VVCFrameContext *fc, const H2645NAL *nal, const CodedBitstreamUnit *unit)
static int frame_setup(VVCFrameContext *fc, VVCContext *s)
CodedBitstreamH2645Context common
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void export_frame_params(VVCContext *s, const VVCFrameContext *fc)
static int decode_slice(VVCContext *s, VVCFrameContext *fc, const H2645NAL *nal, const CodedBitstreamUnit *unit)
void ff_vvc_flush_dpb(VVCFrameContext *fc)
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
static void min_cb_nz_tl_init(TabList *l, VVCFrameContext *fc)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
static int ep_init_cabac_decoder(SliceContext *sc, const int index, const H2645NAL *nal, GetBitContext *gb, const CodedBitstreamUnit *unit)
static av_cold void vvc_decode_flush(AVCodecContext *avctx)
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
void ff_vvc_frame_thread_free(VVCFrameContext *fc)
static int max_negtive(const int idx, const int diff, const int max_diff)
static int ref_frame(VVCFrame *dst, const VVCFrame *src)
static int get_decoded_frame(VVCContext *s, AVFrame *output, int *got_output)
static void min_tu_nz_tl_init(TabList *l, VVCFrameContext *fc)
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
void * ref
RefStruct reference, backing slice data.
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
void ff_vvc_dsp_init(VVCDSPContext *vvcdsp, int bit_depth)
#define MAX_CONTROL_POINTS
const uint32_t * ctb_addr_in_curr_slice
CtbAddrInCurrSlice.
static int frame_start(VVCContext *s, VVCFrameContext *fc, SliceContext *sc)
static int zero(InterplayACMContext *s, unsigned ind, unsigned col)
int ff_cbs_read_extradata_from_codec(CodedBitstreamContext *ctx, CodedBitstreamFragment *frag, const AVCodecContext *avctx)
int ff_vvc_output_frame(VVCContext *s, VVCFrameContext *fc, AVFrame *out, const int no_output_of_prior_pics_flag, int flush)
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
#define FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int ff_vvc_decode_aps(VVCParamSets *ps, const CodedBitstreamUnit *unit)
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const char * name
Name of the codec implementation.
static int set_output_format(const VVCContext *s, const AVFrame *output)
void * av_calloc(size_t nmemb, size_t size)
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
void ff_vvc_ps_uninit(VVCParamSets *ps)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static void ibc_tl_init(TabList *l, VVCFrameContext *fc)
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
void ff_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
static void ispmf_tl_init(TabList *l, VVCFrameContext *fc)
static void slices_free(VVCFrameContext *fc)
main external API structure.
static void eps_free(SliceContext *slice)
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
int ff_vvc_set_new_ref(VVCContext *s, VVCFrameContext *fc, AVFrame **frame)
int ff_cbs_read_packet(CodedBitstreamContext *ctx, CodedBitstreamFragment *frag, const AVPacket *pkt)
Read the data bitstream from a packet into a fragment, then split into units and decompose.
static int wait_delayed_frame(VVCContext *s, AVFrame *output, int *got_output)
static av_cold int frame_context_init(VVCFrameContext *fc, AVCodecContext *avctx)
#define ALF_BORDER_CHROMA
int ff_vvc_frame_rpl(VVCContext *s, VVCFrameContext *fc, SliceContext *sc)
const AVProfile ff_vvc_profiles[]
static av_cold void init_default_scale_m(void)
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
av_cold int ff_cbs_init(CodedBitstreamContext **ctx_ptr, enum AVCodecID codec_id, void *log_ctx)
Create and initialise a new context for the given codec.
static void min_pu_nz_tl_init(TabList *l, VVCFrameContext *fc)
This structure stores compressed data.
static int submit_frame(VVCContext *s, VVCFrameContext *fc, AVFrame *output, int *got_output)
static int tl_free(TabList *l)
static void tl_init(TabList *l, const int zero, const int realloc)
int ff_vvc_per_frame_init(VVCFrameContext *fc)
static int min_positive(const int idx, const int diff, const int min_diff)
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
static int8_t smvd_find(const VVCFrameContext *fc, const SliceContext *sc, int lx, smvd_find_fxn find)
void ff_vvc_bump_frame(VVCContext *s, VVCFrameContext *fc)
void ff_vvc_ctu_free_cus(CodingUnit **cus)