Go to the documentation of this file.
22 #include <dav1d/dav1d.h>
36 #define FF_DAV1D_VERSION_AT_LEAST(x,y) \
37 (DAV1D_API_VERSION_MAJOR > (x) || DAV1D_API_VERSION_MAJOR == (x) && DAV1D_API_VERSION_MINOR >= (y))
103 DAV1D_PICTURE_ALIGNMENT);
109 p->data[0] =
data[0];
110 p->data[1] =
data[1];
111 p->data[2] =
data[2];
112 p->stride[0] = linesize[0];
113 p->stride[1] = linesize[1];
114 p->allocator_data = buf;
130 #if FF_DAV1D_VERSION_AT_LEAST(6,0)
131 int threads =
c->thread_count;
133 int threads = (
c->thread_count ?
c->thread_count :
av_cpu_count()) * 3 / 2;
139 dav1d_default_settings(&
s);
142 s.allocator.cookie = dav1d;
145 s.frame_size_limit =
c->max_pixels;
155 #if FF_DAV1D_VERSION_AT_LEAST(6,0)
159 s.n_threads =
FFMIN(threads, DAV1D_MAX_THREADS);
162 s.n_threads,
s.max_frame_delay);
171 s.n_frame_threads,
s.n_tile_threads);
174 res = dav1d_open(&dav1d->
c, &
s);
185 dav1d_data_unref(&dav1d->
data);
186 dav1d_flush(dav1d->
c);
204 Dav1dPicture pic = { 0 }, *p = &pic;
230 if (!reordered_opaque) {
231 dav1d_data_unref(
data);
235 memcpy(reordered_opaque, &
c->reordered_opaque,
sizeof(
c->reordered_opaque));
236 res = dav1d_data_wrap_user_data(
data, reordered_opaque,
240 dav1d_data_unref(
data);
247 res = dav1d_send_data(dav1d->
c,
data);
252 dav1d_data_unref(
data);
257 res = dav1d_get_picture(dav1d->
c, p);
261 else if (res ==
AVERROR(EAGAIN) &&
c->internal->draining)
271 if (!
frame->buf[0]) {
272 dav1d_picture_unref(p);
276 frame->data[0] = p->data[0];
277 frame->data[1] = p->data[1];
278 frame->data[2] = p->data[2];
279 frame->linesize[0] = p->stride[0];
280 frame->linesize[1] = p->stride[1];
281 frame->linesize[2] = p->stride[1];
283 c->profile = p->seq_hdr->profile;
284 c->level = ((p->seq_hdr->operating_points[0].major_level - 2) << 2)
285 | p->seq_hdr->operating_points[0].minor_level;
286 frame->width = p->p.w;
287 frame->height = p->p.h;
288 if (
c->width != p->p.w ||
c->height != p->p.h) {
295 &
frame->sample_aspect_ratio.den,
296 frame->height * (int64_t)p->frame_hdr->render_width,
297 frame->width * (int64_t)p->frame_hdr->render_height,
301 switch (p->seq_hdr->chr) {
302 case DAV1D_CHR_VERTICAL:
305 case DAV1D_CHR_COLOCATED:
314 if (p->p.layout == DAV1D_PIXEL_LAYOUT_I444 &&
315 p->seq_hdr->mtrx == DAV1D_MC_IDENTITY &&
316 p->seq_hdr->pri == DAV1D_COLOR_PRI_BT709 &&
317 p->seq_hdr->trc == DAV1D_TRC_SRGB)
320 frame->format =
c->pix_fmt =
pix_fmt[p->p.layout][p->seq_hdr->hbd];
322 if (p->m.user_data.data)
323 memcpy(&
frame->reordered_opaque, p->m.user_data.data,
sizeof(
frame->reordered_opaque));
327 if (p->seq_hdr->num_units_in_tick && p->seq_hdr->time_scale) {
329 p->seq_hdr->num_units_in_tick, p->seq_hdr->time_scale, INT_MAX);
330 if (p->seq_hdr->equal_picture_interval)
331 c->ticks_per_frame = p->seq_hdr->num_ticks_per_picture;
335 frame->pts = p->m.timestamp;
338 frame->pkt_pts = p->m.timestamp;
341 frame->pkt_dts = p->m.timestamp;
342 frame->pkt_pos = p->m.offset;
343 frame->pkt_size = p->m.size;
344 frame->pkt_duration = p->m.duration;
345 frame->key_frame = p->frame_hdr->frame_type == DAV1D_FRAME_TYPE_KEY;
347 switch (p->frame_hdr->frame_type) {
348 case DAV1D_FRAME_TYPE_KEY:
349 case DAV1D_FRAME_TYPE_INTRA:
352 case DAV1D_FRAME_TYPE_INTER:
355 case DAV1D_FRAME_TYPE_SWITCH:
363 if (p->mastering_display) {
370 for (
int i = 0;
i < 3;
i++) {
383 if (p->content_light) {
389 light->
MaxCLL = p->content_light->max_content_light_level;
390 light->
MaxFALL = p->content_light->max_frame_average_light_level;
394 unsigned int user_identifier;
399 user_identifier = bytestream2_get_be32(&gb);
400 switch (user_identifier) {
401 case MKBETAG(
'G',
'A',
'9',
'4'): {
420 if (p->frame_hdr->film_grain.present && (!dav1d->
apply_grain ||
429 fgp->
seed = p->frame_hdr->film_grain.data.seed;
451 memcpy(&fgp->
codec.
aom.
uv_mult, &p->frame_hdr->film_grain.data.uv_mult,
461 dav1d_picture_unref(p);
472 dav1d_data_unref(&dav1d->
data);
473 dav1d_close(&dav1d->
c);
478 #ifndef DAV1D_MAX_FRAME_THREADS
479 #define DAV1D_MAX_FRAME_THREADS DAV1D_MAX_THREADS
481 #ifndef DAV1D_MAX_TILE_THREADS
482 #define DAV1D_MAX_TILE_THREADS DAV1D_MAX_THREADS
485 #define OFFSET(x) offsetof(Libdav1dContext, x)
486 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
491 {
"oppoint",
"Select an operating point of the scalable bitstream",
OFFSET(operating_point),
AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 31,
VD },
517 .wrapper_name =
"libdav1d",
void av_vlog(void *avcl, int level, const char *fmt, va_list vl)
Send the specified message to the log if the level is less than or equal to the current av_log_level.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
#define FF_ENABLE_DEPRECATION_WARNINGS
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
AVPixelFormat
Pixel format.
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
static int libdav1d_picture_allocator(Dav1dPicture *p, void *cookie)
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVColorTransferCharacteristic
Color Transfer Characteristic.
static const AVClass libdav1d_class
#define AVERROR_EOF
End of file.
uint8_t * data
The data buffer.
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
uint8_t uv_points[2][10][2]
unsigned MaxCLL
Max content light level (cd/m^2).
This structure describes decoded (raw) audio or video data.
@ AVCOL_RANGE_JPEG
Full range content.
#define AV_PIX_FMT_YUV420P10
static void libdav1d_user_data_free(const uint8_t *data, void *opaque)
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
AVColorPrimaries
Chromaticity coordinates of the source primaries.
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
AVBufferPool * av_buffer_pool_init(buffer_size_t size, AVBufferRef *(*alloc)(buffer_size_t size))
Allocate and initialize a buffer pool.
int grain_scale_shift
Signals the down shift applied to the generated gaussian numbers during synthesis.
#define AV_PIX_FMT_GBRP10
static av_cold int libdav1d_init(AVCodecContext *c)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
int limit_output_range
Signals to clip to limited color levels after film grain application.
int num_y_points
Number of points, and the scale and value for each point of the piecewise linear scaling function for...
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
#define AV_PIX_FMT_YUV444P10
#define DAV1D_MAX_FRAME_THREADS
static __device__ float ceil(float a)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
static __device__ float floor(float a)
#define AV_CODEC_CAP_OTHER_THREADS
Codec supports multithreading through a method other than slice- or frame-level multithreading.
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define AV_PIX_FMT_GRAY10
int ff_parse_a53_cc(AVBufferRef **pbuf, const uint8_t *data, int size)
Parse a data array for ATSC A53 Part 4 Closed Captions and store them in an AVBufferRef.
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
static void flush(AVCodecContext *avctx)
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
static CopyRet receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
static void libdav1d_picture_release(Dav1dPicture *p, void *cookie)
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
#define AV_PIX_FMT_YUV422P10
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
int num_uv_points[2]
If chroma_scaling_from_luma is set to 0, signals the chroma scaling function parameters.
#define FF_CODEC_CAP_AUTO_THREADS
Codec handles avctx->thread_count == 0 (auto) internally.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align)
Setup the data pointers and linesizes based on the specified image parameters and the provided array.
static void libdav1d_log_callback(void *opaque, const char *fmt, va_list vl)
static enum AVPixelFormat pix_fmt_rgb[3]
static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame)
#define AV_PIX_FMT_YUV422P12
static AVRational av_make_q(int num, int den)
Create an AVRational.
#define AV_NOPTS_VALUE
Undefined timestamp value.
#define AV_PIX_FMT_YUV444P12
#define MKBETAG(a, b, c, d)
This structure describes how to handle film grain synthesis in video for specific codecs.
int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align)
Return the size in bytes of the amount of data required to store an image with the given parameters.
AVCodec ff_libdav1d_decoder
int8_t ar_coeffs_y[24]
Luma auto-regression coefficients.
#define AV_LOG_INFO
Standard information.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_PIX_FMT_GBRP12
AVColorSpace
YUV colorspace type.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
int scaling_shift
Specifies the shift applied to the chroma components.
const char * name
Name of the codec implementation.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PIX_FMT_YUV420P12
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
main external API structure.
int ar_coeff_lag
Specifies the auto-regression lag.
int uv_offset[2]
Offset used for component scaling function.
static enum AVPixelFormat pix_fmt[][3]
int uv_mult[2]
Specifies the luma/chroma multipliers for the index to the component scaling function.
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
int overlap_flag
Signals whether to overlap film grain blocks.
static const AVOption libdav1d_options[]
@ AV_PICTURE_TYPE_P
Predicted.
static void libdav1d_flush(AVCodecContext *c)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
A reference to a data buffer.
static void libdav1d_data_free(const uint8_t *data, void *opaque)
union AVFilmGrainParams::@294 codec
Additional fields may be added both here and in any structure included.
unsigned MaxFALL
Max average light level per frame (cd/m^2).
This structure stores compressed data.
int64_t pos
byte position in stream, -1 if unknown
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_GRAY12
int chroma_scaling_from_luma
Signals whether to derive the chroma scaling function from the luma.
@ AV_FILM_GRAIN_PARAMS_AV1
The union is valid when interpreted as AVFilmGrainAOMParams (codec.aom)
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
#define DAV1D_MAX_TILE_THREADS
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
int ar_coeff_shift
Specifies the range of the auto-regressive coefficients.
static av_cold int libdav1d_close(AVCodecContext *c)
#define AV_OPT_FLAG_DEPRECATED
set if option is deprecated, users should refer to AVOption.help text for more information
int8_t ar_coeffs_uv[2][25]
Chroma auto-regression coefficients.