Go to the documentation of this file.
28 #define START_CODE 0x000001
29 #define IS_IDR(nut) (nut == VVC_IDR_W_RADL || nut == VVC_IDR_N_LP)
30 #define IS_H266_SLICE(nut) (nut <= VVC_RASL_NUT || (nut >= VVC_IDR_W_RADL && nut <= VVC_GDR_NUT))
72 switch (
sps->sps_bitdepth_minus8) {
92 for (
i = 0;
i < buf_size;
i++) {
100 code_len = ((pc->
state64 >> 3 * 8) & 0xFFFFFFFF) == 0x01 ? 4 : 3;
102 nut = (pc->
state64 >> (8 + 3)) & 0x1F;
111 return i - (code_len + 2);
114 int sh_picture_header_in_slice_header_flag = buf[
i] >> 7;
116 if (nut ==
VVC_PH_NUT || sh_picture_header_in_slice_header_flag) {
121 return i - (code_len + 2);
151 static const uint8_t h266_sub_width_c[] = {
154 static const uint8_t h266_sub_height_c[] = {
170 s->coded_width =
pps->pps_pic_width_in_luma_samples;
171 s->coded_height =
pps->pps_pic_height_in_luma_samples;
172 s->width =
pps->pps_pic_width_in_luma_samples -
173 (
pps->pps_conf_win_left_offset +
pps->pps_conf_win_right_offset) *
174 h266_sub_width_c[
sps->sps_chroma_format_idc];
175 s->height =
pps->pps_pic_height_in_luma_samples -
176 (
pps->pps_conf_win_top_offset +
pps->pps_conf_win_bottom_offset) *
177 h266_sub_height_c[
sps->sps_chroma_format_idc];
179 avctx->
profile =
sps->profile_tier_level.general_profile_idc;
180 avctx->
level =
sps->profile_tier_level.general_level_idc;
188 if (
sps->sps_ptl_dpb_hrd_params_present_flag &&
189 sps->sps_timing_hrd_params_present_flag) {
190 uint32_t num =
sps->sps_general_timing_hrd_parameters.num_units_in_tick;
191 uint32_t den =
sps->sps_general_timing_hrd_parameters.time_scale;
193 if (num != 0 && den != 0)
207 int poc_msb, max_poc_lsb, poc_lsb;
209 max_poc_lsb = 1 << (
sps->sps_log2_max_pic_order_cnt_lsb_minus4 + 4);
210 poc_lsb =
ph->ph_pic_order_cnt_lsb;
212 if (
ph->ph_poc_msb_cycle_present_flag)
213 poc_msb =
ph->ph_poc_msb_cycle_val * max_poc_lsb;
218 int prev_poc_lsb = prev_poc & (max_poc_lsb - 1);
219 int prev_poc_msb = prev_poc - prev_poc_lsb;
220 if (
ph->ph_poc_msb_cycle_present_flag) {
221 poc_msb =
ph->ph_poc_msb_cycle_val * max_poc_lsb;
223 if ((poc_lsb < prev_poc_lsb) && ((prev_poc_lsb - poc_lsb) >=
225 poc_msb = prev_poc_msb + (unsigned)max_poc_lsb;
226 else if ((poc_lsb > prev_poc_lsb) && ((poc_lsb - prev_poc_lsb) >
228 poc_msb = prev_poc_msb - (unsigned)max_poc_lsb;
230 poc_msb = prev_poc_msb;
234 *poc = poc_msb + poc_lsb;
282 info->ph = &
ph->ph_picture_header;
285 if (
info->slice->header.sh_picture_header_in_slice_header_flag)
286 info->ph = &
info->slice->header.sh_picture_header;
289 "can't find picture header in picture unit.\n");
301 info->pps = h266->
pps[
info->ph->ph_pic_parameter_set_id];
304 info->ph->ph_pic_parameter_set_id);
308 info->sps = h266->
sps[
info->pps->pps_seq_parameter_set_id];
311 info->pps->pps_seq_parameter_set_id);
391 const uint8_t **buf,
int *buf_size)
396 ctx->cbc->log_ctx = avctx;
401 if (
ctx->last_au.size) {
402 *buf =
ctx->last_au.data;
403 *buf_size =
ctx->last_au.size;
413 const uint8_t **poutbuf,
int *poutbuf_size,
414 const uint8_t *buf,
int buf_size)
421 int is_dummy_buf = !buf_size;
422 int flush = !buf_size;
423 const uint8_t *dummy_buf = buf;
429 ctx->parsed_extradata = 1;
446 is_dummy_buf &= (dummy_buf == buf);
459 *poutbuf_size = buf_size;
static void error(const char *err)
static void au_detector_init(AuDetector *d)
#define START_CODE
start_code_prefix_one_3bytes
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
const H266RawPictureHeader * ph
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
enum AVColorSpace colorspace
YUV colorspace type.
AVColorTransferCharacteristic
Color Transfer Characteristic.
av_cold void ff_cbs_fragment_free(CodedBitstreamFragment *frag)
Free the units array of a fragment in addition to what ff_cbs_fragment_reset does.
void * content
Pointer to the decomposed form of this unit.
int av_grow_packet(AVPacket *pkt, int grow_by)
Increase packet size, correctly zeroing padding.
static int FUNC() ph(CodedBitstreamContext *ctx, RWContext *rw, H266RawPH *current)
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
void ff_cbs_fragment_reset(CodedBitstreamFragment *frag)
Free the units contained in a fragment as well as the fragment's own data buffer, but not the units a...
Context structure for coded bitstream operations.
static const CodedBitstreamUnitType decompose_unit_types[]
#define AV_PIX_FMT_YUV420P10
CodedBitstreamUnitType type
Codec-specific type of this unit.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
static int vvc_parser_parse(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size)
Coded bitstream unit structure.
av_cold void ff_cbs_close(CodedBitstreamContext **ctx_ptr)
Close a context and free all internal state.
H266RawPPS * pps[VVC_MAX_PPS_COUNT]
RefStruct references.
int ff_cbs_read(CodedBitstreamContext *ctx, CodedBitstreamFragment *frag, const uint8_t *data, size_t size)
Read a bitstream from a memory region into a fragment, then split into units and decompose.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
CodedBitstreamFragment picture_unit
#define AV_PIX_FMT_YUV444P10
CodedBitstreamUnit * units
Pointer to an array of units of length nb_units_allocated.
@ AV_PICTURE_STRUCTURE_FRAME
coded as frame
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
Coded bitstream fragment structure, combining one or more units.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define AV_PIX_FMT_GRAY10
static int is_au_start(VVCParserContext *s, const PuInfo *pu, void *log_ctx)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
@ AV_PICTURE_TYPE_I
Intra.
const AVCodecParser ff_vvc_parser
void(* flush)(AVBSFContext *ctx)
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
int level
Encoding level descriptor.
static void get_slice_poc(VVCParserContext *s, int *poc, const H266RawSPS *sps, const H266RawPictureHeader *ph, const H266RawSliceHeader *slice, void *log_ctx)
static int parse_nal_units(AVCodecParserContext *s, const uint8_t *buf, int buf_size, AVCodecContext *avctx)
Parse NAL units of found picture and decode some basic information.
static enum AVPixelFormat pix_fmts_10bit[]
static enum AVPixelFormat pix_fmts_8bit[]
static int combine_au(AVCodecParserContext *s, AVCodecContext *avctx, const uint8_t **buf, int *buf_size)
Combine PU to AU.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int ff_cbs_read_extradata_from_codec(CodedBitstreamContext *ctx, CodedBitstreamFragment *frag, const AVCodecContext *avctx)
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
Combine the (truncated) bitstream to a complete frame.
CodedBitstreamContext * cbc
const H266RawSlice * slice
#define PARSER_FLAG_COMPLETE_FRAMES
#define i(width, name, range_min, range_max)
AVColorSpace
YUV colorspace type.
H266RawSPS * sps[VVC_MAX_SPS_COUNT]
RefStruct references.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
#define IS_H266_SLICE(nut)
static av_cold int vvc_parser_init(AVCodecParserContext *s)
static int get_pict_type(const CodedBitstreamFragment *pu)
static int append_au(AVPacket *pkt, const uint8_t *buf, int buf_size)
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
H266RawSliceHeader header
main external API structure.
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
uint32_t CodedBitstreamUnitType
The codec-specific type of a bitstream unit.
static av_cold void vvc_parser_close(AVCodecParserContext *s)
static void set_parser_ctx(AVCodecParserContext *s, AVCodecContext *avctx, const PuInfo *pu)
uint64_t state64
contains the last 8 bytes in MSB order
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PICTURE_TYPE_P
Predicted.
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
av_cold int ff_cbs_init(CodedBitstreamContext **ctx_ptr, enum AVCodecID codec_id, void *log_ctx)
Create and initialise a new context for the given codec.
This structure stores compressed data.
static int get_format(const H266RawSPS *sps)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int find_frame_end(AVCodecParserContext *s, const uint8_t *buf, int buf_size)
Find the end of the current frame in the bitstream.
void * priv_data
Format private data.
static int get_pu_info(PuInfo *info, const CodedBitstreamH266Context *h266, const CodedBitstreamFragment *pu, void *logctx)
int nb_units
Number of units in this fragment.