FFmpeg
vaapi_av1.c
Go to the documentation of this file.
1 /*
2  * AV1 HW decode acceleration through VA API
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "libavutil/frame.h"
22 #include "libavutil/pixdesc.h"
23 #include "hwconfig.h"
24 #include "vaapi_decode.h"
25 #include "internal.h"
26 #include "av1dec.h"
27 #include "thread.h"
28 
29 typedef struct VAAPIAV1FrameRef {
31  int valid;
33 
34 typedef struct VAAPIAV1DecContext {
36 
37  /**
38  * For film grain case, VAAPI generate 2 output for each frame,
39  * current_frame will not apply film grain, and will be used for
40  * references for next frames. Maintain the reference list without
41  * applying film grain here. And current_display_picture will be
42  * used to apply film grain and push to downstream.
43  */
47 
48 static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
49 {
50  if (vf)
51  return ff_vaapi_get_surface_id(vf->f);
52  else
53  return VA_INVALID_SURFACE;
54 }
55 
57 {
58  AV1DecContext *s = avctx->priv_data;
59  const AV1RawSequenceHeader *seq = s->raw_seq;
60  int8_t bit_depth = 8;
61 
62  if (seq->seq_profile == 2 && seq->color_config.high_bitdepth)
63  bit_depth = seq->color_config.twelve_bit ? 12 : 10;
64  else if (seq->seq_profile <= 2)
65  bit_depth = seq->color_config.high_bitdepth ? 10 : 8;
66  else {
67  av_log(avctx, AV_LOG_ERROR,
68  "Couldn't get bit depth from profile:%d.\n", seq->seq_profile);
69  return -1;
70  }
71  return bit_depth == 8 ? 0 : bit_depth == 10 ? 1 : 2;
72 }
73 
75 {
77 
78  ctx->tmp_frame = av_frame_alloc();
79  if (!ctx->tmp_frame) {
80  av_log(avctx, AV_LOG_ERROR,
81  "Failed to allocate frame.\n");
82  return AVERROR(ENOMEM);
83  }
84 
85  for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
86  ctx->ref_tab[i].frame = av_frame_alloc();
87  if (!ctx->ref_tab[i].frame) {
88  av_log(avctx, AV_LOG_ERROR,
89  "Failed to allocate reference table frame %d.\n", i);
90  return AVERROR(ENOMEM);
91  }
92  ctx->ref_tab[i].valid = 0;
93  }
94 
95  return ff_vaapi_decode_init(avctx);
96 }
97 
99 {
101 
102  if (ctx->tmp_frame->buf[0])
103  ff_thread_release_buffer(avctx, ctx->tmp_frame);
104  av_frame_free(&ctx->tmp_frame);
105 
106  for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
107  if (ctx->ref_tab[i].frame->buf[0])
108  ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
109  av_frame_free(&ctx->ref_tab[i].frame);
110  }
111 
112  return ff_vaapi_decode_uninit(avctx);
113 }
114 
115 
117  av_unused const uint8_t *buffer,
118  av_unused uint32_t size)
119 {
120  AV1DecContext *s = avctx->priv_data;
121  const AV1RawSequenceHeader *seq = s->raw_seq;
122  const AV1RawFrameHeader *frame_header = s->raw_frame_header;
123  const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain;
124  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
126  VADecPictureParameterBufferAV1 pic_param;
127  int8_t bit_depth_idx;
128  int err = 0;
129  int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
131  uint8_t segmentation_feature_signed[AV1_SEG_LVL_MAX] = {1, 1, 1, 1, 1, 0, 0, 0};
132  uint8_t segmentation_feature_max[AV1_SEG_LVL_MAX] = {255, AV1_MAX_LOOP_FILTER,
134 
135  bit_depth_idx = vaapi_av1_get_bit_depth_idx(avctx);
136  if (bit_depth_idx < 0)
137  goto fail;
138 
139  if (apply_grain) {
140  if (ctx->tmp_frame->buf[0])
141  ff_thread_release_buffer(avctx, ctx->tmp_frame);
142  err = ff_thread_get_buffer(avctx, ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
143  if (err < 0)
144  goto fail;
145  pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame);
146  } else {
147  pic->output_surface = vaapi_av1_surface_id(&s->cur_frame);
148  }
149 
150  memset(&pic_param, 0, sizeof(VADecPictureParameterBufferAV1));
151  pic_param = (VADecPictureParameterBufferAV1) {
152  .profile = seq->seq_profile,
153  .order_hint_bits_minus_1 = seq->order_hint_bits_minus_1,
154  .bit_depth_idx = bit_depth_idx,
155  .matrix_coefficients = seq->color_config.matrix_coefficients,
156  .current_frame = pic->output_surface,
157  .current_display_picture = vaapi_av1_surface_id(&s->cur_frame),
158  .frame_width_minus1 = frame_header->frame_width_minus_1,
159  .frame_height_minus1 = frame_header->frame_height_minus_1,
160  .primary_ref_frame = frame_header->primary_ref_frame,
161  .order_hint = frame_header->order_hint,
162  .tile_cols = frame_header->tile_cols,
163  .tile_rows = frame_header->tile_rows,
164  .context_update_tile_id = frame_header->context_update_tile_id,
165  .superres_scale_denominator = frame_header->use_superres ?
166  frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN :
168  .interp_filter = frame_header->interpolation_filter,
169  .filter_level[0] = frame_header->loop_filter_level[0],
170  .filter_level[1] = frame_header->loop_filter_level[1],
171  .filter_level_u = frame_header->loop_filter_level[2],
172  .filter_level_v = frame_header->loop_filter_level[3],
173  .base_qindex = frame_header->base_q_idx,
174  .y_dc_delta_q = frame_header->delta_q_y_dc,
175  .u_dc_delta_q = frame_header->delta_q_u_dc,
176  .u_ac_delta_q = frame_header->delta_q_u_ac,
177  .v_dc_delta_q = frame_header->delta_q_v_dc,
178  .v_ac_delta_q = frame_header->delta_q_v_ac,
179  .cdef_damping_minus_3 = frame_header->cdef_damping_minus_3,
180  .cdef_bits = frame_header->cdef_bits,
181  .seq_info_fields.fields = {
182  .still_picture = seq->still_picture,
183  .use_128x128_superblock = seq->use_128x128_superblock,
184  .enable_filter_intra = seq->enable_filter_intra,
185  .enable_intra_edge_filter = seq->enable_intra_edge_filter,
186  .enable_interintra_compound = seq->enable_interintra_compound,
187  .enable_masked_compound = seq->enable_masked_compound,
188  .enable_dual_filter = seq->enable_dual_filter,
189  .enable_order_hint = seq->enable_order_hint,
190  .enable_jnt_comp = seq->enable_jnt_comp,
191  .enable_cdef = seq->enable_cdef,
192  .mono_chrome = seq->color_config.mono_chrome,
193  .color_range = seq->color_config.color_range,
194  .subsampling_x = seq->color_config.subsampling_x,
195  .subsampling_y = seq->color_config.subsampling_y,
196  .chroma_sample_position = seq->color_config.chroma_sample_position,
197  .film_grain_params_present = seq->film_grain_params_present &&
198  !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN),
199  },
200  .seg_info.segment_info_fields.bits = {
201  .enabled = frame_header->segmentation_enabled,
202  .update_map = frame_header->segmentation_update_map,
203  .temporal_update = frame_header->segmentation_temporal_update,
204  .update_data = frame_header->segmentation_update_data,
205  },
206  .film_grain_info = {
207  .film_grain_info_fields.bits = {
208  .apply_grain = apply_grain,
209  .chroma_scaling_from_luma = film_grain->chroma_scaling_from_luma,
210  .grain_scaling_minus_8 = film_grain->grain_scaling_minus_8,
211  .ar_coeff_lag = film_grain->ar_coeff_lag,
212  .ar_coeff_shift_minus_6 = film_grain->ar_coeff_shift_minus_6,
213  .grain_scale_shift = film_grain->grain_scale_shift,
214  .overlap_flag = film_grain->overlap_flag,
215  .clip_to_restricted_range = film_grain->clip_to_restricted_range,
216  },
217  .grain_seed = film_grain->grain_seed,
218  .num_y_points = film_grain->num_y_points,
219  .num_cb_points = film_grain->num_cb_points,
220  .num_cr_points = film_grain->num_cr_points,
221  .cb_mult = film_grain->cb_mult,
222  .cb_luma_mult = film_grain->cb_luma_mult,
223  .cb_offset = film_grain->cb_offset,
224  .cr_mult = film_grain->cr_mult,
225  .cr_luma_mult = film_grain->cr_luma_mult,
226  .cr_offset = film_grain->cr_offset,
227  },
228  .pic_info_fields.bits = {
229  .frame_type = frame_header->frame_type,
230  .show_frame = frame_header->show_frame,
231  .showable_frame = frame_header->showable_frame,
232  .error_resilient_mode = frame_header->error_resilient_mode,
233  .disable_cdf_update = frame_header->disable_cdf_update,
234  .allow_screen_content_tools = frame_header->allow_screen_content_tools,
235  .force_integer_mv = frame_header->force_integer_mv,
236  .allow_intrabc = frame_header->allow_intrabc,
237  .use_superres = frame_header->use_superres,
238  .allow_high_precision_mv = frame_header->allow_high_precision_mv,
239  .is_motion_mode_switchable = frame_header->is_motion_mode_switchable,
240  .use_ref_frame_mvs = frame_header->use_ref_frame_mvs,
241  .disable_frame_end_update_cdf = frame_header->disable_frame_end_update_cdf,
242  .uniform_tile_spacing_flag = frame_header->uniform_tile_spacing_flag,
243  .allow_warped_motion = frame_header->allow_warped_motion,
244  },
245  .loop_filter_info_fields.bits = {
246  .sharpness_level = frame_header->loop_filter_sharpness,
247  .mode_ref_delta_enabled = frame_header->loop_filter_delta_enabled,
248  .mode_ref_delta_update = frame_header->loop_filter_delta_update,
249  },
250  .mode_control_fields.bits = {
251  .delta_q_present_flag = frame_header->delta_q_present,
252  .log2_delta_q_res = frame_header->delta_q_res,
253  .delta_lf_present_flag = frame_header->delta_lf_present,
254  .log2_delta_lf_res = frame_header->delta_lf_res,
255  .delta_lf_multi = frame_header->delta_lf_multi,
256  .tx_mode = frame_header->tx_mode,
257  .reference_select = frame_header->reference_select,
258  .reduced_tx_set_used = frame_header->reduced_tx_set,
259  .skip_mode_present = frame_header->skip_mode_present,
260  },
261  .loop_restoration_fields.bits = {
262  .yframe_restoration_type = remap_lr_type[frame_header->lr_type[0]],
263  .cbframe_restoration_type = remap_lr_type[frame_header->lr_type[1]],
264  .crframe_restoration_type = remap_lr_type[frame_header->lr_type[2]],
265  .lr_unit_shift = frame_header->lr_unit_shift,
266  .lr_uv_shift = frame_header->lr_uv_shift,
267  },
268  .qmatrix_fields.bits = {
269  .using_qmatrix = frame_header->using_qmatrix,
270  .qm_y = frame_header->qm_y,
271  .qm_u = frame_header->qm_u,
272  .qm_v = frame_header->qm_v,
273  }
274  };
275 
276  for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
277  if (pic_param.pic_info_fields.bits.frame_type == AV1_FRAME_KEY && frame_header->show_frame)
278  pic_param.ref_frame_map[i] = VA_INVALID_ID;
279  else
280  pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ?
281  ff_vaapi_get_surface_id(ctx->ref_tab[i].frame) :
283  }
284  for (int i = 0; i < AV1_REFS_PER_FRAME; i++) {
285  pic_param.ref_frame_idx[i] = frame_header->ref_frame_idx[i];
286  }
287  for (int i = 0; i < AV1_TOTAL_REFS_PER_FRAME; i++) {
288  pic_param.ref_deltas[i] = frame_header->loop_filter_ref_deltas[i];
289  }
290  for (int i = 0; i < 2; i++) {
291  pic_param.mode_deltas[i] = frame_header->loop_filter_mode_deltas[i];
292  }
293  for (int i = 0; i < (1 << frame_header->cdef_bits); i++) {
294  pic_param.cdef_y_strengths[i] =
295  (frame_header->cdef_y_pri_strength[i] << 2) +
296  frame_header->cdef_y_sec_strength[i];
297  pic_param.cdef_uv_strengths[i] =
298  (frame_header->cdef_uv_pri_strength[i] << 2) +
299  frame_header->cdef_uv_sec_strength[i];
300  }
301  for (int i = 0; i < frame_header->tile_cols; i++) {
302  pic_param.width_in_sbs_minus_1[i] =
303  frame_header->width_in_sbs_minus_1[i];
304  }
305  for (int i = 0; i < frame_header->tile_rows; i++) {
306  pic_param.height_in_sbs_minus_1[i] =
307  frame_header->height_in_sbs_minus_1[i];
308  }
309  for (int i = AV1_REF_FRAME_LAST; i <= AV1_REF_FRAME_ALTREF; i++) {
310  pic_param.wm[i - 1].invalid = s->cur_frame.gm_invalid[i];
311  pic_param.wm[i - 1].wmtype = s->cur_frame.gm_type[i];
312  for (int j = 0; j < 6; j++)
313  pic_param.wm[i - 1].wmmat[j] = s->cur_frame.gm_params[i][j];
314  }
315  for (int i = 0; i < AV1_MAX_SEGMENTS; i++) {
316  for (int j = 0; j < AV1_SEG_LVL_MAX; j++) {
317  pic_param.seg_info.feature_mask[i] |= (frame_header->feature_enabled[i][j] << j);
318  if (segmentation_feature_signed[j])
319  pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j],
320  -segmentation_feature_max[j], segmentation_feature_max[j]);
321  else
322  pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j],
323  0, segmentation_feature_max[j]);
324  }
325  }
326  if (apply_grain) {
327  for (int i = 0; i < film_grain->num_y_points; i++) {
328  pic_param.film_grain_info.point_y_value[i] =
329  film_grain->point_y_value[i];
330  pic_param.film_grain_info.point_y_scaling[i] =
331  film_grain->point_y_scaling[i];
332  }
333  for (int i = 0; i < film_grain->num_cb_points; i++) {
334  pic_param.film_grain_info.point_cb_value[i] =
335  film_grain->point_cb_value[i];
336  pic_param.film_grain_info.point_cb_scaling[i] =
337  film_grain->point_cb_scaling[i];
338  }
339  for (int i = 0; i < film_grain->num_cr_points; i++) {
340  pic_param.film_grain_info.point_cr_value[i] =
341  film_grain->point_cr_value[i];
342  pic_param.film_grain_info.point_cr_scaling[i] =
343  film_grain->point_cr_scaling[i];
344  }
345  for (int i = 0; i < 24; i++) {
346  pic_param.film_grain_info.ar_coeffs_y[i] =
347  film_grain->ar_coeffs_y_plus_128[i] - 128;
348  }
349  for (int i = 0; i < 25; i++) {
350  pic_param.film_grain_info.ar_coeffs_cb[i] =
351  film_grain->ar_coeffs_cb_plus_128[i] - 128;
352  pic_param.film_grain_info.ar_coeffs_cr[i] =
353  film_grain->ar_coeffs_cr_plus_128[i] - 128;
354  }
355  }
356  err = ff_vaapi_decode_make_param_buffer(avctx, pic,
357  VAPictureParameterBufferType,
358  &pic_param, sizeof(pic_param));
359  if (err < 0)
360  goto fail;
361 
362  return 0;
363 
364 fail:
365  ff_vaapi_decode_cancel(avctx, pic);
366  return err;
367 }
368 
370 {
371  const AV1DecContext *s = avctx->priv_data;
372  const AV1RawFrameHeader *header = s->raw_frame_header;
373  const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain;
374  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
376 
377  int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain;
378  int ret;
379  ret = ff_vaapi_decode_issue(avctx, pic);
380  if (ret < 0)
381  return ret;
382 
383  for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
384  if (header->refresh_frame_flags & (1 << i)) {
385  if (ctx->ref_tab[i].frame->buf[0])
386  ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
387 
388  if (apply_grain) {
389  ret = av_frame_ref(ctx->ref_tab[i].frame, ctx->tmp_frame);
390  if (ret < 0)
391  return ret;
392  ctx->ref_tab[i].valid = 1;
393  } else {
394  ctx->ref_tab[i].valid = 0;
395  }
396  }
397  }
398 
399  return 0;
400 }
401 
403  const uint8_t *buffer,
404  uint32_t size)
405 {
406  const AV1DecContext *s = avctx->priv_data;
407  VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private;
408  VASliceParameterBufferAV1 slice_param;
409  int err = 0;
410 
411  for (int i = s->tg_start; i <= s->tg_end; i++) {
412  memset(&slice_param, 0, sizeof(VASliceParameterBufferAV1));
413 
414  slice_param = (VASliceParameterBufferAV1) {
415  .slice_data_size = s->tile_group_info[i].tile_size,
416  .slice_data_offset = s->tile_group_info[i].tile_offset,
417  .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
418  .tile_row = s->tile_group_info[i].tile_row,
419  .tile_column = s->tile_group_info[i].tile_column,
420  .tg_start = s->tg_start,
421  .tg_end = s->tg_end,
422  };
423 
424  err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &slice_param,
425  sizeof(VASliceParameterBufferAV1),
426  buffer,
427  size);
428  if (err) {
429  ff_vaapi_decode_cancel(avctx, pic);
430  return err;
431  }
432  }
433 
434  return 0;
435 }
436 
438  .name = "av1_vaapi",
439  .type = AVMEDIA_TYPE_VIDEO,
440  .id = AV_CODEC_ID_AV1,
441  .pix_fmt = AV_PIX_FMT_VAAPI,
442  .start_frame = vaapi_av1_start_frame,
443  .end_frame = vaapi_av1_end_frame,
444  .decode_slice = vaapi_av1_decode_slice,
445  .frame_priv_data_size = sizeof(VAAPIDecodePicture),
448  .frame_params = ff_vaapi_common_frame_params,
449  .priv_data_size = sizeof(VAAPIAV1DecContext),
450  .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
451 };
hwconfig.h
AV1_REF_FRAME_ALTREF
@ AV1_REF_FRAME_ALTREF
Definition: av1.h:68
AV1_MAX_LOOP_FILTER
@ AV1_MAX_LOOP_FILTER
Definition: av1.h:123
bit_depth
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
Definition: af_astats.c:227
av_clip
#define av_clip
Definition: common.h:95
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_vaapi_get_surface_id
static VASurfaceID ff_vaapi_get_surface_id(AVFrame *pic)
Definition: vaapi_decode.h:30
VAAPIAV1FrameRef::frame
AVFrame * frame
Definition: vaapi_av1.c:30
AV1RawSequenceHeader
Definition: cbs_av1.h:73
AV1_SUPERRES_DENOM_MIN
@ AV1_SUPERRES_DENOM_MIN
Definition: av1.h:101
VAAPIDecodeContext
Definition: vaapi_decode.h:50
vaapi_decode.h
AV1RawFilmGrainParams::apply_grain
uint8_t apply_grain
Definition: cbs_av1.h:134
av_unused
#define av_unused
Definition: attributes.h:131
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
VAAPIDecodePicture
Definition: vaapi_decode.h:39
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
pixdesc.h
ff_vaapi_decode_make_slice_buffer
int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx, VAAPIDecodePicture *pic, const void *params_data, size_t params_size, const void *slice_data, size_t slice_size)
Definition: vaapi_decode.c:61
internal.h
vaapi_av1_end_frame
static int vaapi_av1_end_frame(AVCodecContext *avctx)
Definition: vaapi_av1.c:369
VAAPIAV1DecContext::ref_tab
VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES]
For film grain case, VAAPI generate 2 output for each frame, current_frame will not apply film grain,...
Definition: vaapi_av1.c:44
AV1_SUPERRES_NUM
@ AV1_SUPERRES_NUM
Definition: av1.h:100
VAAPIAV1DecContext::base
VAAPIDecodeContext base
Definition: vaapi_av1.c:35
thread.h
AV1RawSequenceHeader::seq_profile
uint8_t seq_profile
Definition: cbs_av1.h:74
vaapi_av1_decode_init
static int vaapi_av1_decode_init(AVCodecContext *avctx)
Definition: vaapi_av1.c:74
AVHWAccel
Definition: avcodec.h:2076
ff_vaapi_decode_make_param_buffer
int ff_vaapi_decode_make_param_buffer(AVCodecContext *avctx, VAAPIDecodePicture *pic, int type, const void *data, size_t size)
Definition: vaapi_decode.c:32
fail
#define fail()
Definition: checkasm.h:134
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AV1_RESTORE_SWITCHABLE
@ AV1_RESTORE_SWITCHABLE
Definition: av1.h:175
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
AV1Frame
Definition: av1dec.h:33
VAAPIAV1FrameRef::valid
int valid
Definition: vaapi_av1.c:31
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AV1_SEG_LVL_MAX
@ AV1_SEG_LVL_MAX
Definition: av1.h:89
vaapi_av1_decode_slice
static int vaapi_av1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: vaapi_av1.c:402
s
#define s(width, name)
Definition: cbs_vp9.c:256
ff_vaapi_decode_init
int ff_vaapi_decode_init(AVCodecContext *avctx)
Definition: vaapi_decode.c:655
AV1_TOTAL_REFS_PER_FRAME
@ AV1_TOTAL_REFS_PER_FRAME
Definition: av1.h:85
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:404
ff_vaapi_common_frame_params
int ff_vaapi_common_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Definition: vaapi_decode.c:631
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
ctx
AVFormatContext * ctx
Definition: movenc.c:48
VAAPIAV1DecContext
Definition: vaapi_av1.c:34
ff_vaapi_decode_uninit
int ff_vaapi_decode_uninit(AVCodecContext *avctx)
Definition: vaapi_decode.c:701
av1dec.h
vaapi_av1_get_bit_depth_idx
static int8_t vaapi_av1_get_bit_depth_idx(AVCodecContext *avctx)
Definition: vaapi_av1.c:56
ff_vaapi_decode_issue
int ff_vaapi_decode_issue(AVCodecContext *avctx, VAAPIDecodePicture *pic)
Definition: vaapi_decode.c:153
AV_CODEC_ID_AV1
@ AV_CODEC_ID_AV1
Definition: codec_id.h:283
AV1RawFrameHeader
Definition: cbs_av1.h:165
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:461
AV1_NUM_REF_FRAMES
@ AV1_NUM_REF_FRAMES
Definition: av1.h:83
vaapi_av1_decode_uninit
static int vaapi_av1_decode_uninit(AVCodecContext *avctx)
Definition: vaapi_av1.c:98
AV1_RESTORE_SGRPROJ
@ AV1_RESTORE_SGRPROJ
Definition: av1.h:174
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:344
AVCodecInternal::hwaccel_priv_data
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:137
size
int size
Definition: twinvq_data.h:10344
AV1DecContext
Definition: av1dec.h:63
AV1_RESTORE_NONE
@ AV1_RESTORE_NONE
Definition: av1.h:172
AV1_RESTORE_WIENER
@ AV1_RESTORE_WIENER
Definition: av1.h:173
ff_vaapi_decode_cancel
int ff_vaapi_decode_cancel(AVCodecContext *avctx, VAAPIDecodePicture *pic)
Definition: vaapi_decode.c:227
frame.h
header
static const uint8_t header[24]
Definition: sdr2.c:67
AV1Frame::f
AVFrame * f
Definition: av1dec.h:34
AV1_REFS_PER_FRAME
@ AV1_REFS_PER_FRAME
Definition: av1.h:84
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
ff_thread_release_buffer
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
Definition: pthread_frame.c:961
AVHWAccel::name
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2082
AV1_FRAME_KEY
@ AV1_FRAME_KEY
Definition: av1.h:53
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
vaapi_av1_surface_id
static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
Definition: vaapi_av1.c:48
HWACCEL_CAP_ASYNC_SAFE
#define HWACCEL_CAP_ASYNC_SAFE
Definition: hwconfig.h:26
AV1RawSequenceHeader::color_config
AV1RawColorConfig color_config
Definition: cbs_av1.h:128
vaapi_av1_start_frame
static int vaapi_av1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
Definition: vaapi_av1.c:116
ret
ret
Definition: filter_design.txt:187
AV1RawColorConfig::high_bitdepth
uint8_t high_bitdepth
Definition: cbs_av1.h:42
AVCodecContext
main external API structure.
Definition: avcodec.h:426
frame_header
Definition: truemotion1.c:88
AV1_MAX_SEGMENTS
@ AV1_MAX_SEGMENTS
Definition: av1.h:88
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2004
ff_av1_vaapi_hwaccel
const AVHWAccel ff_av1_vaapi_hwaccel
Definition: vaapi_av1.c:437
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV1RawColorConfig::twelve_bit
uint8_t twelve_bit
Definition: cbs_av1.h:43
VAAPIAV1DecContext::tmp_frame
AVFrame * tmp_frame
Definition: vaapi_av1.c:45
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:453
AV1RawFilmGrainParams
Definition: cbs_av1.h:133
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
uninit
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:285
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:399
VAAPIAV1FrameRef
Definition: vaapi_av1.c:29
AV1_REF_FRAME_LAST
@ AV1_REF_FRAME_LAST
Definition: av1.h:62