FFmpeg
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "config_components.h"
27 
28 #include "libavutil/attributes.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
32 #include "libavutil/internal.h"
33 #include "libavutil/md5.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/timecode.h"
37 
38 #include "bswapdsp.h"
39 #include "cabac_functions.h"
40 #include "codec_internal.h"
41 #include "decode.h"
42 #include "golomb.h"
43 #include "hevc.h"
44 #include "hevc_parse.h"
45 #include "hevcdec.h"
46 #include "hwaccel_internal.h"
47 #include "hwconfig.h"
48 #include "internal.h"
49 #include "profiles.h"
50 #include "refstruct.h"
51 #include "thread.h"
52 #include "threadframe.h"
53 
54 static const uint8_t hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
55 
56 /**
57  * NOTE: Each function hls_foo correspond to the function foo in the
58  * specification (HLS stands for High Level Syntax).
59  */
60 
61 /**
62  * Section 5.7
63  */
64 
65 /* free everything allocated by pic_arrays_init() */
67 {
68  av_freep(&s->sao);
69  av_freep(&s->deblock);
70 
71  av_freep(&s->skip_flag);
72  av_freep(&s->tab_ct_depth);
73 
74  av_freep(&s->tab_ipm);
75  av_freep(&s->cbf_luma);
76  av_freep(&s->is_pcm);
77 
78  av_freep(&s->qp_y_tab);
79  av_freep(&s->tab_slice_address);
80  av_freep(&s->filter_slice_edges);
81 
82  av_freep(&s->horizontal_bs);
83  av_freep(&s->vertical_bs);
84 
85  av_freep(&s->sh.entry_point_offset);
86  av_freep(&s->sh.size);
87  av_freep(&s->sh.offset);
88 
89  ff_refstruct_pool_uninit(&s->tab_mvf_pool);
90  ff_refstruct_pool_uninit(&s->rpl_tab_pool);
91 }
92 
93 /* allocate arrays that depend on frame dimensions */
94 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
95 {
96  int log2_min_cb_size = sps->log2_min_cb_size;
97  int width = sps->width;
98  int height = sps->height;
99  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
100  ((height >> log2_min_cb_size) + 1);
101  int ctb_count = sps->ctb_width * sps->ctb_height;
102  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
103 
104  s->bs_width = (width >> 2) + 1;
105  s->bs_height = (height >> 2) + 1;
106 
107  s->sao = av_calloc(ctb_count, sizeof(*s->sao));
108  s->deblock = av_calloc(ctb_count, sizeof(*s->deblock));
109  if (!s->sao || !s->deblock)
110  goto fail;
111 
112  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
113  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
114  if (!s->skip_flag || !s->tab_ct_depth)
115  goto fail;
116 
117  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
118  s->tab_ipm = av_mallocz(min_pu_size);
119  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
120  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
121  goto fail;
122 
123  s->filter_slice_edges = av_mallocz(ctb_count);
124  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
125  sizeof(*s->tab_slice_address));
126  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
127  sizeof(*s->qp_y_tab));
128  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
129  goto fail;
130 
131  s->horizontal_bs = av_calloc(s->bs_width, s->bs_height);
132  s->vertical_bs = av_calloc(s->bs_width, s->bs_height);
133  if (!s->horizontal_bs || !s->vertical_bs)
134  goto fail;
135 
136  s->tab_mvf_pool = ff_refstruct_pool_alloc(min_pu_size * sizeof(MvField), 0);
137  s->rpl_tab_pool = ff_refstruct_pool_alloc(ctb_count * sizeof(RefPicListTab), 0);
138  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
139  goto fail;
140 
141  return 0;
142 
143 fail:
145  return AVERROR(ENOMEM);
146 }
147 
149 {
150  int i = 0;
151  int j = 0;
152  uint8_t luma_weight_l0_flag[16];
153  uint8_t chroma_weight_l0_flag[16];
154  uint8_t luma_weight_l1_flag[16];
155  uint8_t chroma_weight_l1_flag[16];
156  int luma_log2_weight_denom;
157 
158  luma_log2_weight_denom = get_ue_golomb_long(gb);
159  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
160  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
161  return AVERROR_INVALIDDATA;
162  }
163  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
164  if (s->ps.sps->chroma_format_idc != 0) {
165  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
166  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
167  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
168  return AVERROR_INVALIDDATA;
169  }
170  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
171  }
172 
173  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
174  luma_weight_l0_flag[i] = get_bits1(gb);
175  if (!luma_weight_l0_flag[i]) {
176  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
177  s->sh.luma_offset_l0[i] = 0;
178  }
179  }
180  if (s->ps.sps->chroma_format_idc != 0) {
181  for (i = 0; i < s->sh.nb_refs[L0]; i++)
182  chroma_weight_l0_flag[i] = get_bits1(gb);
183  } else {
184  for (i = 0; i < s->sh.nb_refs[L0]; i++)
185  chroma_weight_l0_flag[i] = 0;
186  }
187  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
188  if (luma_weight_l0_flag[i]) {
189  int delta_luma_weight_l0 = get_se_golomb(gb);
190  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
191  return AVERROR_INVALIDDATA;
192  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
193  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
194  }
195  if (chroma_weight_l0_flag[i]) {
196  for (j = 0; j < 2; j++) {
197  int delta_chroma_weight_l0 = get_se_golomb(gb);
198  int delta_chroma_offset_l0 = get_se_golomb(gb);
199 
200  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
201  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
202  return AVERROR_INVALIDDATA;
203  }
204 
205  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
206  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
207  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
208  }
209  } else {
210  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
211  s->sh.chroma_offset_l0[i][0] = 0;
212  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
213  s->sh.chroma_offset_l0[i][1] = 0;
214  }
215  }
216  if (s->sh.slice_type == HEVC_SLICE_B) {
217  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
218  luma_weight_l1_flag[i] = get_bits1(gb);
219  if (!luma_weight_l1_flag[i]) {
220  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
221  s->sh.luma_offset_l1[i] = 0;
222  }
223  }
224  if (s->ps.sps->chroma_format_idc != 0) {
225  for (i = 0; i < s->sh.nb_refs[L1]; i++)
226  chroma_weight_l1_flag[i] = get_bits1(gb);
227  } else {
228  for (i = 0; i < s->sh.nb_refs[L1]; i++)
229  chroma_weight_l1_flag[i] = 0;
230  }
231  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
232  if (luma_weight_l1_flag[i]) {
233  int delta_luma_weight_l1 = get_se_golomb(gb);
234  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
235  return AVERROR_INVALIDDATA;
236  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
237  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
238  }
239  if (chroma_weight_l1_flag[i]) {
240  for (j = 0; j < 2; j++) {
241  int delta_chroma_weight_l1 = get_se_golomb(gb);
242  int delta_chroma_offset_l1 = get_se_golomb(gb);
243 
244  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
245  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
246  return AVERROR_INVALIDDATA;
247  }
248 
249  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
250  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
251  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
252  }
253  } else {
254  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
255  s->sh.chroma_offset_l1[i][0] = 0;
256  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
257  s->sh.chroma_offset_l1[i][1] = 0;
258  }
259  }
260  }
261  return 0;
262 }
263 
265 {
266  const HEVCSPS *sps = s->ps.sps;
267  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
268  int prev_delta_msb = 0;
269  unsigned int nb_sps = 0, nb_sh;
270  int i;
271 
272  rps->nb_refs = 0;
273  if (!sps->long_term_ref_pics_present_flag)
274  return 0;
275 
276  if (sps->num_long_term_ref_pics_sps > 0)
277  nb_sps = get_ue_golomb_long(gb);
278  nb_sh = get_ue_golomb_long(gb);
279 
280  if (nb_sps > sps->num_long_term_ref_pics_sps)
281  return AVERROR_INVALIDDATA;
282  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
283  return AVERROR_INVALIDDATA;
284 
285  rps->nb_refs = nb_sh + nb_sps;
286 
287  for (i = 0; i < rps->nb_refs; i++) {
288 
289  if (i < nb_sps) {
290  uint8_t lt_idx_sps = 0;
291 
292  if (sps->num_long_term_ref_pics_sps > 1)
293  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
294 
295  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
296  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
297  } else {
298  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
299  rps->used[i] = get_bits1(gb);
300  }
301 
302  rps->poc_msb_present[i] = get_bits1(gb);
303  if (rps->poc_msb_present[i]) {
304  int64_t delta = get_ue_golomb_long(gb);
305  int64_t poc;
306 
307  if (i && i != nb_sps)
308  delta += prev_delta_msb;
309 
310  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
311  if (poc != (int32_t)poc)
312  return AVERROR_INVALIDDATA;
313  rps->poc[i] = poc;
314  prev_delta_msb = delta;
315  }
316  }
317 
318  return 0;
319 }
320 
322 {
323  AVCodecContext *avctx = s->avctx;
324  const HEVCParamSets *ps = &s->ps;
325  const HEVCVPS *vps = ps->vps_list[sps->vps_id];
326  const HEVCWindow *ow = &sps->output_window;
327  unsigned int num = 0, den = 0;
328 
329  avctx->pix_fmt = sps->pix_fmt;
330  avctx->coded_width = sps->width;
331  avctx->coded_height = sps->height;
332  avctx->width = sps->width - ow->left_offset - ow->right_offset;
333  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
334  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
335  avctx->profile = sps->ptl.general_ptl.profile_idc;
336  avctx->level = sps->ptl.general_ptl.level_idc;
337 
338  ff_set_sar(avctx, sps->vui.common.sar);
339 
340  if (sps->vui.common.video_signal_type_present_flag)
341  avctx->color_range = sps->vui.common.video_full_range_flag ? AVCOL_RANGE_JPEG
343  else
344  avctx->color_range = AVCOL_RANGE_MPEG;
345 
346  if (sps->vui.common.colour_description_present_flag) {
347  avctx->color_primaries = sps->vui.common.colour_primaries;
348  avctx->color_trc = sps->vui.common.transfer_characteristics;
349  avctx->colorspace = sps->vui.common.matrix_coeffs;
350  } else {
354  }
355 
357  if (sps->chroma_format_idc == 1) {
358  if (sps->vui.common.chroma_loc_info_present_flag) {
359  if (sps->vui.common.chroma_sample_loc_type_top_field <= 5)
360  avctx->chroma_sample_location = sps->vui.common.chroma_sample_loc_type_top_field + 1;
361  } else
363  }
364 
365  if (vps->vps_timing_info_present_flag) {
366  num = vps->vps_num_units_in_tick;
367  den = vps->vps_time_scale;
368  } else if (sps->vui.vui_timing_info_present_flag) {
369  num = sps->vui.vui_num_units_in_tick;
370  den = sps->vui.vui_time_scale;
371  }
372 
373  if (num != 0 && den != 0)
374  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
375  num, den, 1 << 30);
376 }
377 
379 {
380  AVCodecContext *avctx = s->avctx;
381 
382  if (s->sei.common.a53_caption.buf_ref)
383  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
384 
385  if (s->sei.common.alternative_transfer.present &&
386  av_color_transfer_name(s->sei.common.alternative_transfer.preferred_transfer_characteristics) &&
387  s->sei.common.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
388  avctx->color_trc = s->sei.common.alternative_transfer.preferred_transfer_characteristics;
389  }
390 
391  if (s->sei.common.film_grain_characteristics.present)
393 
394  return 0;
395 }
396 
398 {
399 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
400  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
401  CONFIG_HEVC_NVDEC_HWACCEL + \
402  CONFIG_HEVC_VAAPI_HWACCEL + \
403  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
404  CONFIG_HEVC_VDPAU_HWACCEL + \
405  CONFIG_HEVC_VULKAN_HWACCEL)
406  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
407 
408  switch (sps->pix_fmt) {
409  case AV_PIX_FMT_YUV420P:
410  case AV_PIX_FMT_YUVJ420P:
411 #if CONFIG_HEVC_DXVA2_HWACCEL
412  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
413 #endif
414 #if CONFIG_HEVC_D3D11VA_HWACCEL
415  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
416  *fmt++ = AV_PIX_FMT_D3D11;
417 #endif
418 #if CONFIG_HEVC_VAAPI_HWACCEL
419  *fmt++ = AV_PIX_FMT_VAAPI;
420 #endif
421 #if CONFIG_HEVC_VDPAU_HWACCEL
422  *fmt++ = AV_PIX_FMT_VDPAU;
423 #endif
424 #if CONFIG_HEVC_NVDEC_HWACCEL
425  *fmt++ = AV_PIX_FMT_CUDA;
426 #endif
427 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
428  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
429 #endif
430 #if CONFIG_HEVC_VULKAN_HWACCEL
431  *fmt++ = AV_PIX_FMT_VULKAN;
432 #endif
433  break;
435 #if CONFIG_HEVC_DXVA2_HWACCEL
436  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
437 #endif
438 #if CONFIG_HEVC_D3D11VA_HWACCEL
439  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
440  *fmt++ = AV_PIX_FMT_D3D11;
441 #endif
442 #if CONFIG_HEVC_VAAPI_HWACCEL
443  *fmt++ = AV_PIX_FMT_VAAPI;
444 #endif
445 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
446  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
447 #endif
448 #if CONFIG_HEVC_VULKAN_HWACCEL
449  *fmt++ = AV_PIX_FMT_VULKAN;
450 #endif
451 #if CONFIG_HEVC_VDPAU_HWACCEL
452  *fmt++ = AV_PIX_FMT_VDPAU;
453 #endif
454 #if CONFIG_HEVC_NVDEC_HWACCEL
455  *fmt++ = AV_PIX_FMT_CUDA;
456 #endif
457  break;
458  case AV_PIX_FMT_YUV444P:
459 #if CONFIG_HEVC_VAAPI_HWACCEL
460  *fmt++ = AV_PIX_FMT_VAAPI;
461 #endif
462 #if CONFIG_HEVC_VDPAU_HWACCEL
463  *fmt++ = AV_PIX_FMT_VDPAU;
464 #endif
465 #if CONFIG_HEVC_NVDEC_HWACCEL
466  *fmt++ = AV_PIX_FMT_CUDA;
467 #endif
468 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
469  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
470 #endif
471 #if CONFIG_HEVC_VULKAN_HWACCEL
472  *fmt++ = AV_PIX_FMT_VULKAN;
473 #endif
474  break;
475  case AV_PIX_FMT_YUV422P:
477 #if CONFIG_HEVC_VAAPI_HWACCEL
478  *fmt++ = AV_PIX_FMT_VAAPI;
479 #endif
480 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
481  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
482 #endif
483 #if CONFIG_HEVC_VULKAN_HWACCEL
484  *fmt++ = AV_PIX_FMT_VULKAN;
485 #endif
486  break;
488 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
489  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
490 #endif
491  /* NOTE: fallthrough */
494 #if CONFIG_HEVC_VAAPI_HWACCEL
495  *fmt++ = AV_PIX_FMT_VAAPI;
496 #endif
497 #if CONFIG_HEVC_VDPAU_HWACCEL
498  *fmt++ = AV_PIX_FMT_VDPAU;
499 #endif
500 #if CONFIG_HEVC_VULKAN_HWACCEL
501  *fmt++ = AV_PIX_FMT_VULKAN;
502 #endif
503 #if CONFIG_HEVC_NVDEC_HWACCEL
504  *fmt++ = AV_PIX_FMT_CUDA;
505 #endif
506  break;
508 #if CONFIG_HEVC_VAAPI_HWACCEL
509  *fmt++ = AV_PIX_FMT_VAAPI;
510 #endif
511 #if CONFIG_HEVC_VULKAN_HWACCEL
512  *fmt++ = AV_PIX_FMT_VULKAN;
513 #endif
514  break;
515  }
516 
517  *fmt++ = sps->pix_fmt;
518  *fmt = AV_PIX_FMT_NONE;
519 
520  return ff_get_format(s->avctx, pix_fmts);
521 }
522 
523 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
524  enum AVPixelFormat pix_fmt)
525 {
526  int ret, i;
527 
529  s->ps.sps = NULL;
530  s->ps.vps = NULL;
531 
532  if (!sps)
533  return 0;
534 
535  ret = pic_arrays_init(s, sps);
536  if (ret < 0)
537  goto fail;
538 
540 
541  s->avctx->pix_fmt = pix_fmt;
542 
543  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
544  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
545  ff_videodsp_init (&s->vdsp, sps->bit_depth);
546 
547  for (i = 0; i < 3; i++) {
548  av_freep(&s->sao_pixel_buffer_h[i]);
549  av_freep(&s->sao_pixel_buffer_v[i]);
550  }
551 
552  if (sps->sao_enabled && !s->avctx->hwaccel) {
553  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
554  int c_idx;
555 
556  for(c_idx = 0; c_idx < c_count; c_idx++) {
557  int w = sps->width >> sps->hshift[c_idx];
558  int h = sps->height >> sps->vshift[c_idx];
559  s->sao_pixel_buffer_h[c_idx] =
560  av_malloc((w * 2 * sps->ctb_height) <<
561  sps->pixel_shift);
562  s->sao_pixel_buffer_v[c_idx] =
563  av_malloc((h * 2 * sps->ctb_width) <<
564  sps->pixel_shift);
565  if (!s->sao_pixel_buffer_h[c_idx] ||
566  !s->sao_pixel_buffer_v[c_idx])
567  goto fail;
568  }
569  }
570 
571  s->ps.sps = sps;
572  s->ps.vps = s->ps.vps_list[s->ps.sps->vps_id];
573 
574  return 0;
575 
576 fail:
578  for (i = 0; i < 3; i++) {
579  av_freep(&s->sao_pixel_buffer_h[i]);
580  av_freep(&s->sao_pixel_buffer_v[i]);
581  }
582  s->ps.sps = NULL;
583  return ret;
584 }
585 
587 {
588  GetBitContext *gb = &s->HEVClc->gb;
589  SliceHeader *sh = &s->sh;
590  int i, ret;
591 
592  // Coded parameters
594  if (s->ref && sh->first_slice_in_pic_flag) {
595  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
596  return 1; // This slice will be skipped later, do not corrupt state
597  }
598 
599  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
600  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
601  s->max_ra = INT_MAX;
602  if (IS_IDR(s))
604  }
606  if (IS_IRAP(s))
608 
609  sh->pps_id = get_ue_golomb_long(gb);
610  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
611  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
612  return AVERROR_INVALIDDATA;
613  }
614  if (!sh->first_slice_in_pic_flag &&
615  s->ps.pps != s->ps.pps_list[sh->pps_id]) {
616  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
617  return AVERROR_INVALIDDATA;
618  }
619  s->ps.pps = s->ps.pps_list[sh->pps_id];
620  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
622 
623  if (s->ps.sps != s->ps.sps_list[s->ps.pps->sps_id]) {
624  const HEVCSPS *sps = s->ps.sps_list[s->ps.pps->sps_id];
625  enum AVPixelFormat pix_fmt;
626 
628 
629  ret = set_sps(s, sps, sps->pix_fmt);
630  if (ret < 0)
631  return ret;
632 
633  pix_fmt = get_format(s, sps);
634  if (pix_fmt < 0)
635  return pix_fmt;
636  s->avctx->pix_fmt = pix_fmt;
637 
638  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
639  s->max_ra = INT_MAX;
640  }
641 
643  if (ret < 0)
644  return ret;
645 
647  if (!sh->first_slice_in_pic_flag) {
648  int slice_address_length;
649 
650  if (s->ps.pps->dependent_slice_segments_enabled_flag)
652 
653  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
654  s->ps.sps->ctb_height);
655  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
656  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
657  av_log(s->avctx, AV_LOG_ERROR,
658  "Invalid slice segment address: %u.\n",
659  sh->slice_segment_addr);
660  return AVERROR_INVALIDDATA;
661  }
662 
663  if (!sh->dependent_slice_segment_flag) {
664  sh->slice_addr = sh->slice_segment_addr;
665  s->slice_idx++;
666  }
667  } else {
668  sh->slice_segment_addr = sh->slice_addr = 0;
669  s->slice_idx = 0;
670  s->slice_initialized = 0;
671  }
672 
673  if (!sh->dependent_slice_segment_flag) {
674  s->slice_initialized = 0;
675 
676  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
677  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
678 
679  sh->slice_type = get_ue_golomb_long(gb);
680  if (!(sh->slice_type == HEVC_SLICE_I ||
681  sh->slice_type == HEVC_SLICE_P ||
682  sh->slice_type == HEVC_SLICE_B)) {
683  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
684  sh->slice_type);
685  return AVERROR_INVALIDDATA;
686  }
687  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I &&
688  !s->ps.pps->pps_curr_pic_ref_enabled_flag) {
689  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
690  return AVERROR_INVALIDDATA;
691  }
692 
693  // when flag is not present, picture is inferred to be output
694  sh->pic_output_flag = 1;
695  if (s->ps.pps->output_flag_present_flag)
696  sh->pic_output_flag = get_bits1(gb);
697 
698  if (s->ps.sps->separate_colour_plane_flag)
699  sh->colour_plane_id = get_bits(gb, 2);
700 
701  if (!IS_IDR(s)) {
702  int poc, pos;
703 
704  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
705  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
706  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
707  av_log(s->avctx, AV_LOG_WARNING,
708  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
709  if (s->avctx->err_recognition & AV_EF_EXPLODE)
710  return AVERROR_INVALIDDATA;
711  poc = s->poc;
712  }
713  s->poc = poc;
714 
716  pos = get_bits_left(gb);
718  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
719  if (ret < 0)
720  return ret;
721 
722  sh->short_term_rps = &sh->slice_rps;
723  } else {
724  int numbits, rps_idx;
725 
726  if (!s->ps.sps->nb_st_rps) {
727  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
728  return AVERROR_INVALIDDATA;
729  }
730 
731  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
732  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
733  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
734  }
736 
737  pos = get_bits_left(gb);
738  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
739  if (ret < 0) {
740  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
741  if (s->avctx->err_recognition & AV_EF_EXPLODE)
742  return AVERROR_INVALIDDATA;
743  }
745 
746  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
748  else
750  } else {
751  s->poc = 0;
752  sh->pic_order_cnt_lsb = 0;
755  sh->short_term_rps = NULL;
758  }
759 
760  /* 8.3.1 */
761  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
762  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
763  s->nal_unit_type != HEVC_NAL_TSA_N &&
764  s->nal_unit_type != HEVC_NAL_STSA_N &&
765  s->nal_unit_type != HEVC_NAL_RADL_N &&
766  s->nal_unit_type != HEVC_NAL_RADL_R &&
767  s->nal_unit_type != HEVC_NAL_RASL_N &&
768  s->nal_unit_type != HEVC_NAL_RASL_R)
769  s->pocTid0 = s->poc;
770 
771  if (s->ps.sps->sao_enabled) {
773  if (s->ps.sps->chroma_format_idc) {
776  }
777  } else {
781  }
782 
783  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
784  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
785  int nb_refs;
786 
787  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
788  if (sh->slice_type == HEVC_SLICE_B)
789  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
790 
791  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
792  sh->nb_refs[L0] = get_ue_golomb_31(gb) + 1;
793  if (sh->slice_type == HEVC_SLICE_B)
794  sh->nb_refs[L1] = get_ue_golomb_31(gb) + 1;
795  }
796  if (sh->nb_refs[L0] >= HEVC_MAX_REFS || sh->nb_refs[L1] >= HEVC_MAX_REFS) {
797  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
798  sh->nb_refs[L0], sh->nb_refs[L1]);
799  return AVERROR_INVALIDDATA;
800  }
801 
802  sh->rpl_modification_flag[0] = 0;
803  sh->rpl_modification_flag[1] = 0;
804  nb_refs = ff_hevc_frame_nb_refs(s);
805  if (!nb_refs) {
806  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
807  return AVERROR_INVALIDDATA;
808  }
809 
810  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
811  sh->rpl_modification_flag[0] = get_bits1(gb);
812  if (sh->rpl_modification_flag[0]) {
813  for (i = 0; i < sh->nb_refs[L0]; i++)
814  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
815  }
816 
817  if (sh->slice_type == HEVC_SLICE_B) {
818  sh->rpl_modification_flag[1] = get_bits1(gb);
819  if (sh->rpl_modification_flag[1] == 1)
820  for (i = 0; i < sh->nb_refs[L1]; i++)
821  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
822  }
823  }
824 
825  if (sh->slice_type == HEVC_SLICE_B)
826  sh->mvd_l1_zero_flag = get_bits1(gb);
827 
828  if (s->ps.pps->cabac_init_present_flag)
829  sh->cabac_init_flag = get_bits1(gb);
830  else
831  sh->cabac_init_flag = 0;
832 
833  sh->collocated_ref_idx = 0;
835  sh->collocated_list = L0;
836  if (sh->slice_type == HEVC_SLICE_B)
837  sh->collocated_list = !get_bits1(gb);
838 
839  if (sh->nb_refs[sh->collocated_list] > 1) {
841  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
842  av_log(s->avctx, AV_LOG_ERROR,
843  "Invalid collocated_ref_idx: %d.\n",
844  sh->collocated_ref_idx);
845  return AVERROR_INVALIDDATA;
846  }
847  }
848  }
849 
850  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
851  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
852  int ret = pred_weight_table(s, gb);
853  if (ret < 0)
854  return ret;
855  }
856 
858  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
859  av_log(s->avctx, AV_LOG_ERROR,
860  "Invalid number of merging MVP candidates: %d.\n",
861  sh->max_num_merge_cand);
862  return AVERROR_INVALIDDATA;
863  }
864 
865  // Syntax in 7.3.6.1
866  if (s->ps.sps->motion_vector_resolution_control_idc == 2)
867  sh->use_integer_mv_flag = get_bits1(gb);
868  else
869  // Inferred to be equal to motion_vector_resolution_control_idc if not present
870  sh->use_integer_mv_flag = s->ps.sps->motion_vector_resolution_control_idc;
871 
872  }
873 
874  sh->slice_qp_delta = get_se_golomb(gb);
875 
876  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
879  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
880  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
881  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
882  return AVERROR_INVALIDDATA;
883  }
884  } else {
885  sh->slice_cb_qp_offset = 0;
886  sh->slice_cr_qp_offset = 0;
887  }
888 
889  if (s->ps.pps->pps_slice_act_qp_offsets_present_flag) {
893  }
894 
895  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
897  else
899 
900  if (s->ps.pps->deblocking_filter_control_present_flag) {
901  int deblocking_filter_override_flag = 0;
902 
903  if (s->ps.pps->deblocking_filter_override_enabled_flag)
904  deblocking_filter_override_flag = get_bits1(gb);
905 
906  if (deblocking_filter_override_flag) {
909  int beta_offset_div2 = get_se_golomb(gb);
910  int tc_offset_div2 = get_se_golomb(gb) ;
911  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
912  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
913  av_log(s->avctx, AV_LOG_ERROR,
914  "Invalid deblock filter offsets: %d, %d\n",
915  beta_offset_div2, tc_offset_div2);
916  return AVERROR_INVALIDDATA;
917  }
918  sh->beta_offset = beta_offset_div2 * 2;
919  sh->tc_offset = tc_offset_div2 * 2;
920  }
921  } else {
922  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
923  sh->beta_offset = s->ps.pps->beta_offset;
924  sh->tc_offset = s->ps.pps->tc_offset;
925  }
926  } else {
928  sh->beta_offset = 0;
929  sh->tc_offset = 0;
930  }
931 
932  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
937  } else {
938  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
939  }
940  } else if (!s->slice_initialized) {
941  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
942  return AVERROR_INVALIDDATA;
943  }
944 
945  sh->num_entry_point_offsets = 0;
946  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
947  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
948  // It would be possible to bound this tighter but this here is simpler
949  if (num_entry_point_offsets > get_bits_left(gb)) {
950  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
951  return AVERROR_INVALIDDATA;
952  }
953 
954  sh->num_entry_point_offsets = num_entry_point_offsets;
955  if (sh->num_entry_point_offsets > 0) {
956  int offset_len = get_ue_golomb_long(gb) + 1;
957 
958  if (offset_len < 1 || offset_len > 32) {
959  sh->num_entry_point_offsets = 0;
960  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
961  return AVERROR_INVALIDDATA;
962  }
963 
965  av_freep(&sh->offset);
966  av_freep(&sh->size);
967  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
968  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
969  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
970  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
971  sh->num_entry_point_offsets = 0;
972  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
973  return AVERROR(ENOMEM);
974  }
975  for (i = 0; i < sh->num_entry_point_offsets; i++) {
976  unsigned val = get_bits_long(gb, offset_len);
977  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
978  }
979  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
980  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
981  s->threads_number = 1;
982  } else
983  s->enable_parallel_tiles = 0;
984  } else
985  s->enable_parallel_tiles = 0;
986  }
987 
988  if (s->ps.pps->slice_header_extension_present_flag) {
989  unsigned int length = get_ue_golomb_long(gb);
990  if (length*8LL > get_bits_left(gb)) {
991  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
992  return AVERROR_INVALIDDATA;
993  }
994  for (i = 0; i < length; i++)
995  skip_bits(gb, 8); // slice_header_extension_data_byte
996  }
997 
998  // Inferred parameters
999  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
1000  if (sh->slice_qp > 51 ||
1001  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
1002  av_log(s->avctx, AV_LOG_ERROR,
1003  "The slice_qp %d is outside the valid range "
1004  "[%d, 51].\n",
1005  sh->slice_qp,
1006  -s->ps.sps->qp_bd_offset);
1007  return AVERROR_INVALIDDATA;
1008  }
1009 
1011 
1012  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
1013  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
1014  return AVERROR_INVALIDDATA;
1015  }
1016 
1017  if (get_bits_left(gb) < 0) {
1018  av_log(s->avctx, AV_LOG_ERROR,
1019  "Overread slice header by %d bits\n", -get_bits_left(gb));
1020  return AVERROR_INVALIDDATA;
1021  }
1022 
1023  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
1024 
1025  if (!s->ps.pps->cu_qp_delta_enabled_flag)
1026  s->HEVClc->qp_y = s->sh.slice_qp;
1027 
1028  s->slice_initialized = 1;
1029  s->HEVClc->tu.cu_qp_offset_cb = 0;
1030  s->HEVClc->tu.cu_qp_offset_cr = 0;
1031 
1032  return 0;
1033 }
1034 
1035 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
1036 
1037 #define SET_SAO(elem, value) \
1038 do { \
1039  if (!sao_merge_up_flag && !sao_merge_left_flag) \
1040  sao->elem = value; \
1041  else if (sao_merge_left_flag) \
1042  sao->elem = CTB(s->sao, rx-1, ry).elem; \
1043  else if (sao_merge_up_flag) \
1044  sao->elem = CTB(s->sao, rx, ry-1).elem; \
1045  else \
1046  sao->elem = 0; \
1047 } while (0)
1048 
1049 static void hls_sao_param(HEVCLocalContext *lc, int rx, int ry)
1050 {
1051  const HEVCContext *const s = lc->parent;
1052  int sao_merge_left_flag = 0;
1053  int sao_merge_up_flag = 0;
1054  SAOParams *sao = &CTB(s->sao, rx, ry);
1055  int c_idx, i;
1056 
1057  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
1058  s->sh.slice_sample_adaptive_offset_flag[1]) {
1059  if (rx > 0) {
1060  if (lc->ctb_left_flag)
1061  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(lc);
1062  }
1063  if (ry > 0 && !sao_merge_left_flag) {
1064  if (lc->ctb_up_flag)
1065  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(lc);
1066  }
1067  }
1068 
1069  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1070  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
1071  s->ps.pps->log2_sao_offset_scale_chroma;
1072 
1073  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1074  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
1075  continue;
1076  }
1077 
1078  if (c_idx == 2) {
1079  sao->type_idx[2] = sao->type_idx[1];
1080  sao->eo_class[2] = sao->eo_class[1];
1081  } else {
1082  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(lc));
1083  }
1084 
1085  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
1086  continue;
1087 
1088  for (i = 0; i < 4; i++)
1089  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(lc));
1090 
1091  if (sao->type_idx[c_idx] == SAO_BAND) {
1092  for (i = 0; i < 4; i++) {
1093  if (sao->offset_abs[c_idx][i]) {
1094  SET_SAO(offset_sign[c_idx][i],
1096  } else {
1097  sao->offset_sign[c_idx][i] = 0;
1098  }
1099  }
1100  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(lc));
1101  } else if (c_idx != 2) {
1102  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(lc));
1103  }
1104 
1105  // Inferred parameters
1106  sao->offset_val[c_idx][0] = 0;
1107  for (i = 0; i < 4; i++) {
1108  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1109  if (sao->type_idx[c_idx] == SAO_EDGE) {
1110  if (i > 1)
1111  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1112  } else if (sao->offset_sign[c_idx][i]) {
1113  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1114  }
1115  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1116  }
1117  }
1118 }
1119 
1120 #undef SET_SAO
1121 #undef CTB
1122 
1124 {
1125  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(lc, idx);
1126 
1127  if (log2_res_scale_abs_plus1 != 0) {
1128  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(lc, idx);
1129  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1130  (1 - 2 * res_scale_sign_flag);
1131  } else {
1132  lc->tu.res_scale_val = 0;
1133  }
1134 
1135 
1136  return 0;
1137 }
1138 
1139 static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0,
1140  int xBase, int yBase, int cb_xBase, int cb_yBase,
1141  int log2_cb_size, int log2_trafo_size,
1142  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1143 {
1144  const HEVCContext *const s = lc->parent;
1145  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1146  int i;
1147 
1148  if (lc->cu.pred_mode == MODE_INTRA) {
1149  int trafo_size = 1 << log2_trafo_size;
1150  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size, trafo_size);
1151 
1152  s->hpc.intra_pred[log2_trafo_size - 2](lc, x0, y0, 0);
1153  }
1154 
1155  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1156  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1157  int scan_idx = SCAN_DIAG;
1158  int scan_idx_c = SCAN_DIAG;
1159  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1160  (s->ps.sps->chroma_format_idc == 2 &&
1161  (cbf_cb[1] || cbf_cr[1]));
1162 
1163  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1165  if (lc->tu.cu_qp_delta != 0)
1166  if (ff_hevc_cu_qp_delta_sign_flag(lc) == 1)
1167  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1168  lc->tu.is_cu_qp_delta_coded = 1;
1169 
1170  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1171  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1172  av_log(s->avctx, AV_LOG_ERROR,
1173  "The cu_qp_delta %d is outside the valid range "
1174  "[%d, %d].\n",
1175  lc->tu.cu_qp_delta,
1176  -(26 + s->ps.sps->qp_bd_offset / 2),
1177  (25 + s->ps.sps->qp_bd_offset / 2));
1178  return AVERROR_INVALIDDATA;
1179  }
1180 
1181  ff_hevc_set_qPy(lc, cb_xBase, cb_yBase, log2_cb_size);
1182  }
1183 
1184  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1186  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(lc);
1187  if (cu_chroma_qp_offset_flag) {
1188  int cu_chroma_qp_offset_idx = 0;
1189  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1190  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(lc);
1191  av_log(s->avctx, AV_LOG_ERROR,
1192  "cu_chroma_qp_offset_idx not yet tested.\n");
1193  }
1194  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1195  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1196  } else {
1197  lc->tu.cu_qp_offset_cb = 0;
1198  lc->tu.cu_qp_offset_cr = 0;
1199  }
1201  }
1202 
1203  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1204  if (lc->tu.intra_pred_mode >= 6 &&
1205  lc->tu.intra_pred_mode <= 14) {
1206  scan_idx = SCAN_VERT;
1207  } else if (lc->tu.intra_pred_mode >= 22 &&
1208  lc->tu.intra_pred_mode <= 30) {
1209  scan_idx = SCAN_HORIZ;
1210  }
1211 
1212  if (lc->tu.intra_pred_mode_c >= 6 &&
1213  lc->tu.intra_pred_mode_c <= 14) {
1214  scan_idx_c = SCAN_VERT;
1215  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1216  lc->tu.intra_pred_mode_c <= 30) {
1217  scan_idx_c = SCAN_HORIZ;
1218  }
1219  }
1220 
1221  lc->tu.cross_pf = 0;
1222 
1223  if (cbf_luma)
1224  ff_hevc_hls_residual_coding(lc, x0, y0, log2_trafo_size, scan_idx, 0);
1225  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1226  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1227  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1228  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1229  (lc->cu.pred_mode == MODE_INTER ||
1230  (lc->tu.chroma_mode_c == 4)));
1231 
1232  if (lc->tu.cross_pf) {
1233  hls_cross_component_pred(lc, 0);
1234  }
1235  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1236  if (lc->cu.pred_mode == MODE_INTRA) {
1237  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1238  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 1);
1239  }
1240  if (cbf_cb[i])
1241  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1242  log2_trafo_size_c, scan_idx_c, 1);
1243  else
1244  if (lc->tu.cross_pf) {
1245  ptrdiff_t stride = s->frame->linesize[1];
1246  int hshift = s->ps.sps->hshift[1];
1247  int vshift = s->ps.sps->vshift[1];
1248  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1249  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1250  int size = 1 << log2_trafo_size_c;
1251 
1252  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1253  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1254  for (i = 0; i < (size * size); i++) {
1255  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1256  }
1257  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1258  }
1259  }
1260 
1261  if (lc->tu.cross_pf) {
1262  hls_cross_component_pred(lc, 1);
1263  }
1264  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1265  if (lc->cu.pred_mode == MODE_INTRA) {
1266  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c),
1267  trafo_size_h, trafo_size_v);
1268  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 2);
1269  }
1270  if (cbf_cr[i])
1271  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1272  log2_trafo_size_c, scan_idx_c, 2);
1273  else
1274  if (lc->tu.cross_pf) {
1275  ptrdiff_t stride = s->frame->linesize[2];
1276  int hshift = s->ps.sps->hshift[2];
1277  int vshift = s->ps.sps->vshift[2];
1278  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1279  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1280  int size = 1 << log2_trafo_size_c;
1281 
1282  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1283  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1284  for (i = 0; i < (size * size); i++) {
1285  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1286  }
1287  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1288  }
1289  }
1290  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1291  int trafo_size_h = 1 << (log2_trafo_size + 1);
1292  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1293  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1294  if (lc->cu.pred_mode == MODE_INTRA) {
1295  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1296  trafo_size_h, trafo_size_v);
1297  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 1);
1298  }
1299  if (cbf_cb[i])
1300  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1301  log2_trafo_size, scan_idx_c, 1);
1302  }
1303  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1304  if (lc->cu.pred_mode == MODE_INTRA) {
1305  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1306  trafo_size_h, trafo_size_v);
1307  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 2);
1308  }
1309  if (cbf_cr[i])
1310  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1311  log2_trafo_size, scan_idx_c, 2);
1312  }
1313  }
1314  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1315  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1316  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1317  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1318  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size_h, trafo_size_v);
1319  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 1);
1320  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 2);
1321  if (s->ps.sps->chroma_format_idc == 2) {
1322  ff_hevc_set_neighbour_available(lc, x0, y0 + (1 << log2_trafo_size_c),
1323  trafo_size_h, trafo_size_v);
1324  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 1);
1325  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 2);
1326  }
1327  } else if (blk_idx == 3) {
1328  int trafo_size_h = 1 << (log2_trafo_size + 1);
1329  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1330  ff_hevc_set_neighbour_available(lc, xBase, yBase,
1331  trafo_size_h, trafo_size_v);
1332  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 1);
1333  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 2);
1334  if (s->ps.sps->chroma_format_idc == 2) {
1335  ff_hevc_set_neighbour_available(lc, xBase, yBase + (1 << log2_trafo_size),
1336  trafo_size_h, trafo_size_v);
1337  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 1);
1338  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 2);
1339  }
1340  }
1341  }
1342 
1343  return 0;
1344 }
1345 
1346 static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
1347 {
1348  int cb_size = 1 << log2_cb_size;
1349  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1350 
1351  int min_pu_width = s->ps.sps->min_pu_width;
1352  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1353  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1354  int i, j;
1355 
1356  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1357  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1358  s->is_pcm[i + j * min_pu_width] = 2;
1359 }
1360 
1361 static int hls_transform_tree(HEVCLocalContext *lc, int x0, int y0,
1362  int xBase, int yBase, int cb_xBase, int cb_yBase,
1363  int log2_cb_size, int log2_trafo_size,
1364  int trafo_depth, int blk_idx,
1365  const int *base_cbf_cb, const int *base_cbf_cr)
1366 {
1367  const HEVCContext *const s = lc->parent;
1368  uint8_t split_transform_flag;
1369  int cbf_cb[2];
1370  int cbf_cr[2];
1371  int ret;
1372 
1373  cbf_cb[0] = base_cbf_cb[0];
1374  cbf_cb[1] = base_cbf_cb[1];
1375  cbf_cr[0] = base_cbf_cr[0];
1376  cbf_cr[1] = base_cbf_cr[1];
1377 
1378  if (lc->cu.intra_split_flag) {
1379  if (trafo_depth == 1) {
1380  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1381  if (s->ps.sps->chroma_format_idc == 3) {
1382  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1383  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1384  } else {
1386  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1387  }
1388  }
1389  } else {
1390  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1392  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1393  }
1394 
1395  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1396  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1397  trafo_depth < lc->cu.max_trafo_depth &&
1398  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1399  split_transform_flag = ff_hevc_split_transform_flag_decode(lc, log2_trafo_size);
1400  } else {
1401  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1402  lc->cu.pred_mode == MODE_INTER &&
1403  lc->cu.part_mode != PART_2Nx2N &&
1404  trafo_depth == 0;
1405 
1406  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1407  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1408  inter_split;
1409  }
1410 
1411  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1412  if (trafo_depth == 0 || cbf_cb[0]) {
1413  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1414  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1415  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1416  }
1417  }
1418 
1419  if (trafo_depth == 0 || cbf_cr[0]) {
1420  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1421  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1422  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1423  }
1424  }
1425  }
1426 
1427  if (split_transform_flag) {
1428  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1429  const int x1 = x0 + trafo_size_split;
1430  const int y1 = y0 + trafo_size_split;
1431 
1432 #define SUBDIVIDE(x, y, idx) \
1433 do { \
1434  ret = hls_transform_tree(lc, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size,\
1435  log2_trafo_size - 1, trafo_depth + 1, idx, \
1436  cbf_cb, cbf_cr); \
1437  if (ret < 0) \
1438  return ret; \
1439 } while (0)
1440 
1441  SUBDIVIDE(x0, y0, 0);
1442  SUBDIVIDE(x1, y0, 1);
1443  SUBDIVIDE(x0, y1, 2);
1444  SUBDIVIDE(x1, y1, 3);
1445 
1446 #undef SUBDIVIDE
1447  } else {
1448  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1449  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1450  int min_tu_width = s->ps.sps->min_tb_width;
1451  int cbf_luma = 1;
1452 
1453  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1454  cbf_cb[0] || cbf_cr[0] ||
1455  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1456  cbf_luma = ff_hevc_cbf_luma_decode(lc, trafo_depth);
1457  }
1458 
1459  ret = hls_transform_unit(lc, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1460  log2_cb_size, log2_trafo_size,
1461  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1462  if (ret < 0)
1463  return ret;
1464  // TODO: store cbf_luma somewhere else
1465  if (cbf_luma) {
1466  int i, j;
1467  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1468  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1469  int x_tu = (x0 + j) >> log2_min_tu_size;
1470  int y_tu = (y0 + i) >> log2_min_tu_size;
1471  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1472  }
1473  }
1474  if (!s->sh.disable_deblocking_filter_flag) {
1475  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_trafo_size);
1476  if (s->ps.pps->transquant_bypass_enable_flag &&
1478  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1479  }
1480  }
1481  return 0;
1482 }
1483 
1484 static int hls_pcm_sample(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
1485 {
1486  const HEVCContext *const s = lc->parent;
1487  GetBitContext gb;
1488  int cb_size = 1 << log2_cb_size;
1489  ptrdiff_t stride0 = s->frame->linesize[0];
1490  ptrdiff_t stride1 = s->frame->linesize[1];
1491  ptrdiff_t stride2 = s->frame->linesize[2];
1492  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1493  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1494  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1495 
1496  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1497  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1498  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1499  s->ps.sps->pcm.bit_depth_chroma;
1500  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1501  int ret;
1502 
1503  if (!s->sh.disable_deblocking_filter_flag)
1504  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
1505 
1506  ret = init_get_bits(&gb, pcm, length);
1507  if (ret < 0)
1508  return ret;
1509 
1510  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1511  if (s->ps.sps->chroma_format_idc) {
1512  s->hevcdsp.put_pcm(dst1, stride1,
1513  cb_size >> s->ps.sps->hshift[1],
1514  cb_size >> s->ps.sps->vshift[1],
1515  &gb, s->ps.sps->pcm.bit_depth_chroma);
1516  s->hevcdsp.put_pcm(dst2, stride2,
1517  cb_size >> s->ps.sps->hshift[2],
1518  cb_size >> s->ps.sps->vshift[2],
1519  &gb, s->ps.sps->pcm.bit_depth_chroma);
1520  }
1521 
1522  return 0;
1523 }
1524 
1525 /**
1526  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1527  *
1528  * @param s HEVC decoding context
1529  * @param dst target buffer for block data at block position
1530  * @param dststride stride of the dst buffer
1531  * @param ref reference picture buffer at origin (0, 0)
1532  * @param mv motion vector (relative to block position) to get pixel data from
1533  * @param x_off horizontal position of block from origin (0, 0)
1534  * @param y_off vertical position of block from origin (0, 0)
1535  * @param block_w width of block
1536  * @param block_h height of block
1537  * @param luma_weight weighting factor applied to the luma prediction
1538  * @param luma_offset additive offset applied to the luma prediction value
1539  */
1540 
1541 static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1542  const AVFrame *ref, const Mv *mv, int x_off, int y_off,
1543  int block_w, int block_h, int luma_weight, int luma_offset)
1544 {
1545  const HEVCContext *const s = lc->parent;
1546  const uint8_t *src = ref->data[0];
1547  ptrdiff_t srcstride = ref->linesize[0];
1548  int pic_width = s->ps.sps->width;
1549  int pic_height = s->ps.sps->height;
1550  int mx = mv->x & 3;
1551  int my = mv->y & 3;
1552  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1553  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1554  int idx = hevc_pel_weight[block_w];
1555 
1556  x_off += mv->x >> 2;
1557  y_off += mv->y >> 2;
1558  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1559 
1560  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1561  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1562  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER ||
1563  ref == s->frame) {
1564  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1565  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1566  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1567 
1568  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1569  edge_emu_stride, srcstride,
1570  block_w + QPEL_EXTRA,
1571  block_h + QPEL_EXTRA,
1572  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1573  pic_width, pic_height);
1574  src = lc->edge_emu_buffer + buf_offset;
1575  srcstride = edge_emu_stride;
1576  }
1577 
1578  if (!weight_flag)
1579  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1580  block_h, mx, my, block_w);
1581  else
1582  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1583  block_h, s->sh.luma_log2_weight_denom,
1584  luma_weight, luma_offset, mx, my, block_w);
1585 }
1586 
1587 /**
1588  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1589  *
1590  * @param s HEVC decoding context
1591  * @param dst target buffer for block data at block position
1592  * @param dststride stride of the dst buffer
1593  * @param ref0 reference picture0 buffer at origin (0, 0)
1594  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1595  * @param x_off horizontal position of block from origin (0, 0)
1596  * @param y_off vertical position of block from origin (0, 0)
1597  * @param block_w width of block
1598  * @param block_h height of block
1599  * @param ref1 reference picture1 buffer at origin (0, 0)
1600  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1601  * @param current_mv current motion vector structure
1602  */
1603  static void luma_mc_bi(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1604  const AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1605  int block_w, int block_h, const AVFrame *ref1,
1606  const Mv *mv1, struct MvField *current_mv)
1607 {
1608  const HEVCContext *const s = lc->parent;
1609  ptrdiff_t src0stride = ref0->linesize[0];
1610  ptrdiff_t src1stride = ref1->linesize[0];
1611  int pic_width = s->ps.sps->width;
1612  int pic_height = s->ps.sps->height;
1613  int mx0 = mv0->x & 3;
1614  int my0 = mv0->y & 3;
1615  int mx1 = mv1->x & 3;
1616  int my1 = mv1->y & 3;
1617  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1618  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1619  int x_off0 = x_off + (mv0->x >> 2);
1620  int y_off0 = y_off + (mv0->y >> 2);
1621  int x_off1 = x_off + (mv1->x >> 2);
1622  int y_off1 = y_off + (mv1->y >> 2);
1623  int idx = hevc_pel_weight[block_w];
1624 
1625  const uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1626  const uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1627 
1628  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1629  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1630  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1631  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1632  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1633  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1634 
1635  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1636  edge_emu_stride, src0stride,
1637  block_w + QPEL_EXTRA,
1638  block_h + QPEL_EXTRA,
1639  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1640  pic_width, pic_height);
1641  src0 = lc->edge_emu_buffer + buf_offset;
1642  src0stride = edge_emu_stride;
1643  }
1644 
1645  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1646  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1647  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1648  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1649  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1650  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1651 
1652  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1653  edge_emu_stride, src1stride,
1654  block_w + QPEL_EXTRA,
1655  block_h + QPEL_EXTRA,
1656  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1657  pic_width, pic_height);
1658  src1 = lc->edge_emu_buffer2 + buf_offset;
1659  src1stride = edge_emu_stride;
1660  }
1661 
1662  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1663  block_h, mx0, my0, block_w);
1664  if (!weight_flag)
1665  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1666  block_h, mx1, my1, block_w);
1667  else
1668  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1669  block_h, s->sh.luma_log2_weight_denom,
1670  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1671  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1672  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1673  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1674  mx1, my1, block_w);
1675 
1676 }
1677 
1678 /**
1679  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1680  *
1681  * @param s HEVC decoding context
1682  * @param dst1 target buffer for block data at block position (U plane)
1683  * @param dst2 target buffer for block data at block position (V plane)
1684  * @param dststride stride of the dst1 and dst2 buffers
1685  * @param ref reference picture buffer at origin (0, 0)
1686  * @param mv motion vector (relative to block position) to get pixel data from
1687  * @param x_off horizontal position of block from origin (0, 0)
1688  * @param y_off vertical position of block from origin (0, 0)
1689  * @param block_w width of block
1690  * @param block_h height of block
1691  * @param chroma_weight weighting factor applied to the chroma prediction
1692  * @param chroma_offset additive offset applied to the chroma prediction value
1693  */
1694 
1695 static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0,
1696  ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist,
1697  int x_off, int y_off, int block_w, int block_h,
1698  const struct MvField *current_mv, int chroma_weight, int chroma_offset)
1699 {
1700  const HEVCContext *const s = lc->parent;
1701  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1702  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1703  const Mv *mv = &current_mv->mv[reflist];
1704  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1705  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1706  int idx = hevc_pel_weight[block_w];
1707  int hshift = s->ps.sps->hshift[1];
1708  int vshift = s->ps.sps->vshift[1];
1709  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1710  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1711  intptr_t _mx = mx << (1 - hshift);
1712  intptr_t _my = my << (1 - vshift);
1713  int emu = src0 == s->frame->data[1] || src0 == s->frame->data[2];
1714 
1715  x_off += mv->x >> (2 + hshift);
1716  y_off += mv->y >> (2 + vshift);
1717  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1718 
1719  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1720  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1721  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER ||
1722  emu) {
1723  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1724  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1725  int buf_offset0 = EPEL_EXTRA_BEFORE *
1726  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1727  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1728  edge_emu_stride, srcstride,
1729  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1730  x_off - EPEL_EXTRA_BEFORE,
1731  y_off - EPEL_EXTRA_BEFORE,
1732  pic_width, pic_height);
1733 
1734  src0 = lc->edge_emu_buffer + buf_offset0;
1735  srcstride = edge_emu_stride;
1736  }
1737  if (!weight_flag)
1738  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1739  block_h, _mx, _my, block_w);
1740  else
1741  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1742  block_h, s->sh.chroma_log2_weight_denom,
1743  chroma_weight, chroma_offset, _mx, _my, block_w);
1744 }
1745 
1746 /**
1747  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1748  *
1749  * @param s HEVC decoding context
1750  * @param dst target buffer for block data at block position
1751  * @param dststride stride of the dst buffer
1752  * @param ref0 reference picture0 buffer at origin (0, 0)
1753  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1754  * @param x_off horizontal position of block from origin (0, 0)
1755  * @param y_off vertical position of block from origin (0, 0)
1756  * @param block_w width of block
1757  * @param block_h height of block
1758  * @param ref1 reference picture1 buffer at origin (0, 0)
1759  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1760  * @param current_mv current motion vector structure
1761  * @param cidx chroma component(cb, cr)
1762  */
1763 static void chroma_mc_bi(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride,
1764  const AVFrame *ref0, const AVFrame *ref1,
1765  int x_off, int y_off, int block_w, int block_h, const MvField *current_mv, int cidx)
1766 {
1767  const HEVCContext *const s = lc->parent;
1768  const uint8_t *src1 = ref0->data[cidx+1];
1769  const uint8_t *src2 = ref1->data[cidx+1];
1770  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1771  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1772  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1773  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1774  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1775  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1776  const Mv *const mv0 = &current_mv->mv[0];
1777  const Mv *const mv1 = &current_mv->mv[1];
1778  int hshift = s->ps.sps->hshift[1];
1779  int vshift = s->ps.sps->vshift[1];
1780 
1781  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1782  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1783  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1784  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1785  intptr_t _mx0 = mx0 << (1 - hshift);
1786  intptr_t _my0 = my0 << (1 - vshift);
1787  intptr_t _mx1 = mx1 << (1 - hshift);
1788  intptr_t _my1 = my1 << (1 - vshift);
1789 
1790  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1791  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1792  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1793  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1794  int idx = hevc_pel_weight[block_w];
1795  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1796  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1797 
1798  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1799  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1800  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1801  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1802  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1803  int buf_offset1 = EPEL_EXTRA_BEFORE *
1804  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1805 
1806  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1807  edge_emu_stride, src1stride,
1808  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1809  x_off0 - EPEL_EXTRA_BEFORE,
1810  y_off0 - EPEL_EXTRA_BEFORE,
1811  pic_width, pic_height);
1812 
1813  src1 = lc->edge_emu_buffer + buf_offset1;
1814  src1stride = edge_emu_stride;
1815  }
1816 
1817  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1818  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1819  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1820  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1821  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1822  int buf_offset1 = EPEL_EXTRA_BEFORE *
1823  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1824 
1825  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1826  edge_emu_stride, src2stride,
1827  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1828  x_off1 - EPEL_EXTRA_BEFORE,
1829  y_off1 - EPEL_EXTRA_BEFORE,
1830  pic_width, pic_height);
1831 
1832  src2 = lc->edge_emu_buffer2 + buf_offset1;
1833  src2stride = edge_emu_stride;
1834  }
1835 
1836  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1837  block_h, _mx0, _my0, block_w);
1838  if (!weight_flag)
1839  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1840  src2, src2stride, lc->tmp,
1841  block_h, _mx1, _my1, block_w);
1842  else
1843  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1844  src2, src2stride, lc->tmp,
1845  block_h,
1846  s->sh.chroma_log2_weight_denom,
1847  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1848  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1849  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1850  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1851  _mx1, _my1, block_w);
1852 }
1853 
1854 static void hevc_await_progress(const HEVCContext *s, const HEVCFrame *ref,
1855  const Mv *mv, int y0, int height)
1856 {
1857  if (s->threads_type == FF_THREAD_FRAME ) {
1858  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1859 
1860  ff_thread_await_progress(&ref->tf, y, 0);
1861  }
1862 }
1863 
1864 static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW,
1865  int nPbH, int log2_cb_size, int part_idx,
1866  int merge_idx, MvField *mv)
1867 {
1868  const HEVCContext *const s = lc->parent;
1869  enum InterPredIdc inter_pred_idc = PRED_L0;
1870  int mvp_flag;
1871 
1872  ff_hevc_set_neighbour_available(lc, x0, y0, nPbW, nPbH);
1873  mv->pred_flag = 0;
1874  if (s->sh.slice_type == HEVC_SLICE_B)
1875  inter_pred_idc = ff_hevc_inter_pred_idc_decode(lc, nPbW, nPbH);
1876 
1877  if (inter_pred_idc != PRED_L1) {
1878  if (s->sh.nb_refs[L0])
1879  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L0]);
1880 
1881  mv->pred_flag = PF_L0;
1882  ff_hevc_hls_mvd_coding(lc, x0, y0, 0);
1883  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1884  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1885  part_idx, merge_idx, mv, mvp_flag, 0);
1886  mv->mv[0].x += lc->pu.mvd.x;
1887  mv->mv[0].y += lc->pu.mvd.y;
1888  }
1889 
1890  if (inter_pred_idc != PRED_L0) {
1891  if (s->sh.nb_refs[L1])
1892  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L1]);
1893 
1894  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1895  AV_ZERO32(&lc->pu.mvd);
1896  } else {
1897  ff_hevc_hls_mvd_coding(lc, x0, y0, 1);
1898  }
1899 
1900  mv->pred_flag += PF_L1;
1901  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1902  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1903  part_idx, merge_idx, mv, mvp_flag, 1);
1904  mv->mv[1].x += lc->pu.mvd.x;
1905  mv->mv[1].y += lc->pu.mvd.y;
1906  }
1907 }
1908 
1909 static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
1910  int nPbW, int nPbH,
1911  int log2_cb_size, int partIdx, int idx)
1912 {
1913 #define POS(c_idx, x, y) \
1914  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1915  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1916  const HEVCContext *const s = lc->parent;
1917  int merge_idx = 0;
1918  struct MvField current_mv = {{{ 0 }}};
1919 
1920  int min_pu_width = s->ps.sps->min_pu_width;
1921 
1922  MvField *tab_mvf = s->ref->tab_mvf;
1923  const RefPicList *refPicList = s->ref->refPicList;
1924  const HEVCFrame *ref0 = NULL, *ref1 = NULL;
1925  uint8_t *dst0 = POS(0, x0, y0);
1926  uint8_t *dst1 = POS(1, x0, y0);
1927  uint8_t *dst2 = POS(2, x0, y0);
1928  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1929  int min_cb_width = s->ps.sps->min_cb_width;
1930  int x_cb = x0 >> log2_min_cb_size;
1931  int y_cb = y0 >> log2_min_cb_size;
1932  int x_pu, y_pu;
1933  int i, j;
1934 
1935  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1936 
1937  if (!skip_flag)
1939 
1940  if (skip_flag || lc->pu.merge_flag) {
1941  if (s->sh.max_num_merge_cand > 1)
1942  merge_idx = ff_hevc_merge_idx_decode(lc);
1943  else
1944  merge_idx = 0;
1945 
1946  ff_hevc_luma_mv_merge_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1947  partIdx, merge_idx, &current_mv);
1948  } else {
1949  hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1950  partIdx, merge_idx, &current_mv);
1951  }
1952 
1953  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1954  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1955 
1956  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1957  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1958  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1959 
1960  if (current_mv.pred_flag & PF_L0) {
1961  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1962  if (!ref0 || !ref0->frame->data[0])
1963  return;
1964  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1965  }
1966  if (current_mv.pred_flag & PF_L1) {
1967  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1968  if (!ref1 || !ref1->frame->data[0])
1969  return;
1970  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1971  }
1972 
1973  if (current_mv.pred_flag == PF_L0) {
1974  int x0_c = x0 >> s->ps.sps->hshift[1];
1975  int y0_c = y0 >> s->ps.sps->vshift[1];
1976  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1977  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1978 
1979  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref0->frame,
1980  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1981  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1982  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1983 
1984  if (s->ps.sps->chroma_format_idc) {
1985  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1986  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1987  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1988  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1989  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1990  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1991  }
1992  } else if (current_mv.pred_flag == PF_L1) {
1993  int x0_c = x0 >> s->ps.sps->hshift[1];
1994  int y0_c = y0 >> s->ps.sps->vshift[1];
1995  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1996  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1997 
1998  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref1->frame,
1999  &current_mv.mv[1], x0, y0, nPbW, nPbH,
2000  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
2001  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
2002 
2003  if (s->ps.sps->chroma_format_idc) {
2004  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
2005  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
2006  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
2007 
2008  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
2009  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
2010  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
2011  }
2012  } else if (current_mv.pred_flag == PF_BI) {
2013  int x0_c = x0 >> s->ps.sps->hshift[1];
2014  int y0_c = y0 >> s->ps.sps->vshift[1];
2015  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
2016  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
2017 
2018  luma_mc_bi(lc, dst0, s->frame->linesize[0], ref0->frame,
2019  &current_mv.mv[0], x0, y0, nPbW, nPbH,
2020  ref1->frame, &current_mv.mv[1], &current_mv);
2021 
2022  if (s->ps.sps->chroma_format_idc) {
2023  chroma_mc_bi(lc, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
2024  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
2025 
2026  chroma_mc_bi(lc, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
2027  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
2028  }
2029  }
2030 }
2031 
2032 /**
2033  * 8.4.1
2034  */
2035 static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size,
2036  int prev_intra_luma_pred_flag)
2037 {
2038  const HEVCContext *const s = lc->parent;
2039  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2040  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2041  int min_pu_width = s->ps.sps->min_pu_width;
2042  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
2043  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
2044  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
2045 
2046  int cand_up = (lc->ctb_up_flag || y0b) ?
2047  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
2048  int cand_left = (lc->ctb_left_flag || x0b) ?
2049  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
2050 
2051  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
2052 
2053  MvField *tab_mvf = s->ref->tab_mvf;
2054  int intra_pred_mode;
2055  int candidate[3];
2056  int i, j;
2057 
2058  // intra_pred_mode prediction does not cross vertical CTB boundaries
2059  if ((y0 - 1) < y_ctb)
2060  cand_up = INTRA_DC;
2061 
2062  if (cand_left == cand_up) {
2063  if (cand_left < 2) {
2064  candidate[0] = INTRA_PLANAR;
2065  candidate[1] = INTRA_DC;
2066  candidate[2] = INTRA_ANGULAR_26;
2067  } else {
2068  candidate[0] = cand_left;
2069  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2070  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2071  }
2072  } else {
2073  candidate[0] = cand_left;
2074  candidate[1] = cand_up;
2075  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
2076  candidate[2] = INTRA_PLANAR;
2077  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
2078  candidate[2] = INTRA_DC;
2079  } else {
2080  candidate[2] = INTRA_ANGULAR_26;
2081  }
2082  }
2083 
2084  if (prev_intra_luma_pred_flag) {
2085  intra_pred_mode = candidate[lc->pu.mpm_idx];
2086  } else {
2087  if (candidate[0] > candidate[1])
2088  FFSWAP(uint8_t, candidate[0], candidate[1]);
2089  if (candidate[0] > candidate[2])
2090  FFSWAP(uint8_t, candidate[0], candidate[2]);
2091  if (candidate[1] > candidate[2])
2092  FFSWAP(uint8_t, candidate[1], candidate[2]);
2093 
2094  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
2095  for (i = 0; i < 3; i++)
2096  if (intra_pred_mode >= candidate[i])
2097  intra_pred_mode++;
2098  }
2099 
2100  /* write the intra prediction units into the mv array */
2101  if (!size_in_pus)
2102  size_in_pus = 1;
2103  for (i = 0; i < size_in_pus; i++) {
2104  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2105  intra_pred_mode, size_in_pus);
2106 
2107  for (j = 0; j < size_in_pus; j++) {
2108  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2109  }
2110  }
2111 
2112  return intra_pred_mode;
2113 }
2114 
2115 static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0,
2116  int log2_cb_size, int ct_depth)
2117 {
2118  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2119  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2120  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2121  int y;
2122 
2123  for (y = 0; y < length; y++)
2124  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2125  ct_depth, length);
2126 }
2127 
2128 static const uint8_t tab_mode_idx[] = {
2129  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2130  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2131 
2132 static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
2133  int log2_cb_size)
2134 {
2135  const HEVCContext *const s = lc->parent;
2136  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2137  uint8_t prev_intra_luma_pred_flag[4];
2138  int split = lc->cu.part_mode == PART_NxN;
2139  int pb_size = (1 << log2_cb_size) >> split;
2140  int side = split + 1;
2141  int chroma_mode;
2142  int i, j;
2143 
2144  for (i = 0; i < side; i++)
2145  for (j = 0; j < side; j++)
2146  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(lc);
2147 
2148  for (i = 0; i < side; i++) {
2149  for (j = 0; j < side; j++) {
2150  if (prev_intra_luma_pred_flag[2 * i + j])
2151  lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(lc);
2152  else
2154 
2155  lc->pu.intra_pred_mode[2 * i + j] =
2156  luma_intra_pred_mode(lc, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2157  prev_intra_luma_pred_flag[2 * i + j]);
2158  }
2159  }
2160 
2161  if (s->ps.sps->chroma_format_idc == 3) {
2162  for (i = 0; i < side; i++) {
2163  for (j = 0; j < side; j++) {
2164  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2165  if (chroma_mode != 4) {
2166  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2167  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2168  else
2169  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2170  } else {
2171  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2172  }
2173  }
2174  }
2175  } else if (s->ps.sps->chroma_format_idc == 2) {
2176  int mode_idx;
2177  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2178  if (chroma_mode != 4) {
2179  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2180  mode_idx = 34;
2181  else
2182  mode_idx = intra_chroma_table[chroma_mode];
2183  } else {
2184  mode_idx = lc->pu.intra_pred_mode[0];
2185  }
2186  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2187  } else if (s->ps.sps->chroma_format_idc != 0) {
2188  chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2189  if (chroma_mode != 4) {
2190  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2191  lc->pu.intra_pred_mode_c[0] = 34;
2192  else
2193  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2194  } else {
2195  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2196  }
2197  }
2198 }
2199 
2201  int x0, int y0,
2202  int log2_cb_size)
2203 {
2204  const HEVCContext *const s = lc->parent;
2205  int pb_size = 1 << log2_cb_size;
2206  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2207  int min_pu_width = s->ps.sps->min_pu_width;
2208  MvField *tab_mvf = s->ref->tab_mvf;
2209  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2210  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2211  int j, k;
2212 
2213  if (size_in_pus == 0)
2214  size_in_pus = 1;
2215  for (j = 0; j < size_in_pus; j++)
2216  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2217  if (lc->cu.pred_mode == MODE_INTRA)
2218  for (j = 0; j < size_in_pus; j++)
2219  for (k = 0; k < size_in_pus; k++)
2220  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2221 }
2222 
2223 static int hls_coding_unit(HEVCLocalContext *lc, const HEVCContext *s, int x0, int y0, int log2_cb_size)
2224 {
2225  int cb_size = 1 << log2_cb_size;
2226  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2227  int length = cb_size >> log2_min_cb_size;
2228  int min_cb_width = s->ps.sps->min_cb_width;
2229  int x_cb = x0 >> log2_min_cb_size;
2230  int y_cb = y0 >> log2_min_cb_size;
2231  int idx = log2_cb_size - 2;
2232  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2233  int x, y, ret;
2234 
2235  lc->cu.x = x0;
2236  lc->cu.y = y0;
2237  lc->cu.pred_mode = MODE_INTRA;
2238  lc->cu.part_mode = PART_2Nx2N;
2239  lc->cu.intra_split_flag = 0;
2240 
2241  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2242  for (x = 0; x < 4; x++)
2243  lc->pu.intra_pred_mode[x] = 1;
2244  if (s->ps.pps->transquant_bypass_enable_flag) {
2246  if (lc->cu.cu_transquant_bypass_flag)
2247  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2248  } else
2249  lc->cu.cu_transquant_bypass_flag = 0;
2250 
2251  if (s->sh.slice_type != HEVC_SLICE_I) {
2252  uint8_t skip_flag = ff_hevc_skip_flag_decode(lc, x0, y0, x_cb, y_cb);
2253 
2254  x = y_cb * min_cb_width + x_cb;
2255  for (y = 0; y < length; y++) {
2256  memset(&s->skip_flag[x], skip_flag, length);
2257  x += min_cb_width;
2258  }
2259  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2260  } else {
2261  x = y_cb * min_cb_width + x_cb;
2262  for (y = 0; y < length; y++) {
2263  memset(&s->skip_flag[x], 0, length);
2264  x += min_cb_width;
2265  }
2266  }
2267 
2268  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2269  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2270  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2271 
2272  if (!s->sh.disable_deblocking_filter_flag)
2273  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2274  } else {
2275  int pcm_flag = 0;
2276 
2277  if (s->sh.slice_type != HEVC_SLICE_I)
2279  if (lc->cu.pred_mode != MODE_INTRA ||
2280  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2281  lc->cu.part_mode = ff_hevc_part_mode_decode(lc, log2_cb_size);
2282  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2283  lc->cu.pred_mode == MODE_INTRA;
2284  }
2285 
2286  if (lc->cu.pred_mode == MODE_INTRA) {
2287  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2288  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2289  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2290  pcm_flag = ff_hevc_pcm_flag_decode(lc);
2291  }
2292  if (pcm_flag) {
2293  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2294  ret = hls_pcm_sample(lc, x0, y0, log2_cb_size);
2295  if (s->ps.sps->pcm.loop_filter_disable_flag)
2296  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2297 
2298  if (ret < 0)
2299  return ret;
2300  } else {
2301  intra_prediction_unit(lc, x0, y0, log2_cb_size);
2302  }
2303  } else {
2304  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2305  switch (lc->cu.part_mode) {
2306  case PART_2Nx2N:
2307  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2308  break;
2309  case PART_2NxN:
2310  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2311  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2312  break;
2313  case PART_Nx2N:
2314  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2315  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2316  break;
2317  case PART_2NxnU:
2318  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2319  hls_prediction_unit(lc, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2320  break;
2321  case PART_2NxnD:
2322  hls_prediction_unit(lc, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2323  hls_prediction_unit(lc, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2324  break;
2325  case PART_nLx2N:
2326  hls_prediction_unit(lc, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2327  hls_prediction_unit(lc, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2328  break;
2329  case PART_nRx2N:
2330  hls_prediction_unit(lc, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2331  hls_prediction_unit(lc, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2332  break;
2333  case PART_NxN:
2334  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2335  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2336  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2337  hls_prediction_unit(lc, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2338  break;
2339  }
2340  }
2341 
2342  if (!pcm_flag) {
2343  int rqt_root_cbf = 1;
2344 
2345  if (lc->cu.pred_mode != MODE_INTRA &&
2346  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2347  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(lc);
2348  }
2349  if (rqt_root_cbf) {
2350  const static int cbf[2] = { 0 };
2351  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2352  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2353  s->ps.sps->max_transform_hierarchy_depth_inter;
2354  ret = hls_transform_tree(lc, x0, y0, x0, y0, x0, y0,
2355  log2_cb_size,
2356  log2_cb_size, 0, 0, cbf, cbf);
2357  if (ret < 0)
2358  return ret;
2359  } else {
2360  if (!s->sh.disable_deblocking_filter_flag)
2361  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2362  }
2363  }
2364  }
2365 
2366  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2367  ff_hevc_set_qPy(lc, x0, y0, log2_cb_size);
2368 
2369  x = y_cb * min_cb_width + x_cb;
2370  for (y = 0; y < length; y++) {
2371  memset(&s->qp_y_tab[x], lc->qp_y, length);
2372  x += min_cb_width;
2373  }
2374 
2375  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2376  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2377  lc->qPy_pred = lc->qp_y;
2378  }
2379 
2380  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2381 
2382  return 0;
2383 }
2384 
2385 static int hls_coding_quadtree(HEVCLocalContext *lc, int x0, int y0,
2386  int log2_cb_size, int cb_depth)
2387 {
2388  const HEVCContext *const s = lc->parent;
2389  const int cb_size = 1 << log2_cb_size;
2390  int ret;
2391  int split_cu;
2392 
2393  lc->ct_depth = cb_depth;
2394  if (x0 + cb_size <= s->ps.sps->width &&
2395  y0 + cb_size <= s->ps.sps->height &&
2396  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2397  split_cu = ff_hevc_split_coding_unit_flag_decode(lc, cb_depth, x0, y0);
2398  } else {
2399  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2400  }
2401  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2402  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2403  lc->tu.is_cu_qp_delta_coded = 0;
2404  lc->tu.cu_qp_delta = 0;
2405  }
2406 
2407  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2408  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2410  }
2411 
2412  if (split_cu) {
2413  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2414  const int cb_size_split = cb_size >> 1;
2415  const int x1 = x0 + cb_size_split;
2416  const int y1 = y0 + cb_size_split;
2417 
2418  int more_data = 0;
2419 
2420  more_data = hls_coding_quadtree(lc, x0, y0, log2_cb_size - 1, cb_depth + 1);
2421  if (more_data < 0)
2422  return more_data;
2423 
2424  if (more_data && x1 < s->ps.sps->width) {
2425  more_data = hls_coding_quadtree(lc, x1, y0, log2_cb_size - 1, cb_depth + 1);
2426  if (more_data < 0)
2427  return more_data;
2428  }
2429  if (more_data && y1 < s->ps.sps->height) {
2430  more_data = hls_coding_quadtree(lc, x0, y1, log2_cb_size - 1, cb_depth + 1);
2431  if (more_data < 0)
2432  return more_data;
2433  }
2434  if (more_data && x1 < s->ps.sps->width &&
2435  y1 < s->ps.sps->height) {
2436  more_data = hls_coding_quadtree(lc, x1, y1, log2_cb_size - 1, cb_depth + 1);
2437  if (more_data < 0)
2438  return more_data;
2439  }
2440 
2441  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2442  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2443  lc->qPy_pred = lc->qp_y;
2444 
2445  if (more_data)
2446  return ((x1 + cb_size_split) < s->ps.sps->width ||
2447  (y1 + cb_size_split) < s->ps.sps->height);
2448  else
2449  return 0;
2450  } else {
2451  ret = hls_coding_unit(lc, s, x0, y0, log2_cb_size);
2452  if (ret < 0)
2453  return ret;
2454  if ((!((x0 + cb_size) %
2455  (1 << (s->ps.sps->log2_ctb_size))) ||
2456  (x0 + cb_size >= s->ps.sps->width)) &&
2457  (!((y0 + cb_size) %
2458  (1 << (s->ps.sps->log2_ctb_size))) ||
2459  (y0 + cb_size >= s->ps.sps->height))) {
2460  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(lc);
2461  return !end_of_slice_flag;
2462  } else {
2463  return 1;
2464  }
2465  }
2466 
2467  return 0;
2468 }
2469 
2470 static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb,
2471  int ctb_addr_ts)
2472 {
2473  const HEVCContext *const s = lc->parent;
2474  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2475  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2476  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2477 
2478  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2479 
2480  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2481  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2482  lc->first_qp_group = 1;
2483  lc->end_of_tiles_x = s->ps.sps->width;
2484  } else if (s->ps.pps->tiles_enabled_flag) {
2485  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2486  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2487  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2488  lc->first_qp_group = 1;
2489  }
2490  } else {
2491  lc->end_of_tiles_x = s->ps.sps->width;
2492  }
2493 
2494  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2495 
2496  lc->boundary_flags = 0;
2497  if (s->ps.pps->tiles_enabled_flag) {
2498  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2500  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2502  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2504  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2506  } else {
2507  if (ctb_addr_in_slice <= 0)
2509  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2511  }
2512 
2513  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2514  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2515  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2516  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2517 }
2518 
2519 static int hls_decode_entry(AVCodecContext *avctxt, void *arg)
2520 {
2521  HEVCContext *s = avctxt->priv_data;
2522  HEVCLocalContext *const lc = s->HEVClc;
2523  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2524  int more_data = 1;
2525  int x_ctb = 0;
2526  int y_ctb = 0;
2527  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2528  int ret;
2529 
2530  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2531  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2532  return AVERROR_INVALIDDATA;
2533  }
2534 
2535  if (s->sh.dependent_slice_segment_flag) {
2536  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2537  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2538  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2539  return AVERROR_INVALIDDATA;
2540  }
2541  }
2542 
2543  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2544  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2545 
2546  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2547  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2548  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2549 
2550  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2551  if (ret < 0) {
2552  s->tab_slice_address[ctb_addr_rs] = -1;
2553  return ret;
2554  }
2555 
2556  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2557 
2558  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2559  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2560  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2561 
2562  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2563  if (more_data < 0) {
2564  s->tab_slice_address[ctb_addr_rs] = -1;
2565  return more_data;
2566  }
2567 
2568 
2569  ctb_addr_ts++;
2570  ff_hevc_save_states(lc, ctb_addr_ts);
2571  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2572  }
2573 
2574  if (x_ctb + ctb_size >= s->ps.sps->width &&
2575  y_ctb + ctb_size >= s->ps.sps->height)
2576  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2577 
2578  return ctb_addr_ts;
2579 }
2580 
2582 {
2583  int ret = 0;
2584 
2585  s->avctx->execute(s->avctx, hls_decode_entry, NULL, &ret , 1, 0);
2586  return ret;
2587 }
2588 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *hevc_lclist,
2589  int job, int self_id)
2590 {
2591  HEVCLocalContext *lc = ((HEVCLocalContext**)hevc_lclist)[self_id];
2592  const HEVCContext *const s = lc->parent;
2593  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2594  int more_data = 1;
2595  int ctb_row = job;
2596  int ctb_addr_rs = s->sh.slice_ctb_addr_rs + ctb_row * ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size);
2597  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2598  int thread = ctb_row % s->threads_number;
2599  int ret;
2600 
2601  if(ctb_row) {
2602  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2603  if (ret < 0)
2604  goto error;
2605  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2606  }
2607 
2608  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2609  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2610  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2611 
2612  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2613 
2614  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2615 
2616  /* atomic_load's prototype requires a pointer to non-const atomic variable
2617  * (due to implementations via mutexes, where reads involve writes).
2618  * Of course, casting const away here is nevertheless safe. */
2619  if (atomic_load((atomic_int*)&s->wpp_err)) {
2620  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2621  return 0;
2622  }
2623 
2624  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2625  if (ret < 0)
2626  goto error;
2627  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2628  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2629 
2630  if (more_data < 0) {
2631  ret = more_data;
2632  goto error;
2633  }
2634 
2635  ctb_addr_ts++;
2636 
2637  ff_hevc_save_states(lc, ctb_addr_ts);
2638  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2639  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2640 
2641  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2642  /* Casting const away here is safe, because it is an atomic operation. */
2643  atomic_store((atomic_int*)&s->wpp_err, 1);
2644  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2645  return 0;
2646  }
2647 
2648  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2649  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2650  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2651  return ctb_addr_ts;
2652  }
2653  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2654  x_ctb+=ctb_size;
2655 
2656  if(x_ctb >= s->ps.sps->width) {
2657  break;
2658  }
2659  }
2660  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2661 
2662  return 0;
2663 error:
2664  s->tab_slice_address[ctb_addr_rs] = -1;
2665  /* Casting const away here is safe, because it is an atomic operation. */
2666  atomic_store((atomic_int*)&s->wpp_err, 1);
2667  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2668  return ret;
2669 }
2670 
2671 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2672 {
2673  const uint8_t *data = nal->data;
2674  int length = nal->size;
2675  HEVCLocalContext *lc = s->HEVClc;
2676  int *ret;
2677  int64_t offset;
2678  int64_t startheader, cmpt = 0;
2679  int i, j, res = 0;
2680 
2681  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2682  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2683  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2684  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2685  );
2686  return AVERROR_INVALIDDATA;
2687  }
2688 
2689  for (i = 1; i < s->threads_number; i++) {
2690  if (s->HEVClcList[i])
2691  continue;
2692  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2693  if (!s->HEVClcList[i])
2694  return AVERROR(ENOMEM);
2695  s->HEVClcList[i]->logctx = s->avctx;
2696  s->HEVClcList[i]->parent = s;
2697  s->HEVClcList[i]->common_cabac_state = &s->cabac;
2698  }
2699 
2700  offset = (lc->gb.index >> 3);
2701 
2702  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2703  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2704  startheader--;
2705  cmpt++;
2706  }
2707  }
2708 
2709  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2710  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2711  for (j = 0, cmpt = 0, startheader = offset
2712  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2713  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2714  startheader--;
2715  cmpt++;
2716  }
2717  }
2718  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2719  s->sh.offset[i - 1] = offset;
2720 
2721  }
2722  if (s->sh.num_entry_point_offsets != 0) {
2723  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2724  if (length < offset) {
2725  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2726  return AVERROR_INVALIDDATA;
2727  }
2728  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2729  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2730 
2731  }
2732  s->data = data;
2733 
2734  for (i = 1; i < s->threads_number; i++) {
2735  s->HEVClcList[i]->first_qp_group = 1;
2736  s->HEVClcList[i]->qp_y = s->HEVClc->qp_y;
2737  }
2738 
2739  atomic_store(&s->wpp_err, 0);
2740  res = ff_slice_thread_allocz_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2741  if (res < 0)
2742  return res;
2743 
2744  ret = av_calloc(s->sh.num_entry_point_offsets + 1, sizeof(*ret));
2745  if (!ret)
2746  return AVERROR(ENOMEM);
2747 
2748  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2749  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, s->HEVClcList, ret, s->sh.num_entry_point_offsets + 1);
2750 
2751  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2752  res += ret[i];
2753 
2754  av_free(ret);
2755  return res;
2756 }
2757 
2759 {
2760  AVFrame *out = s->ref->frame;
2761  int ret;
2762 
2763  // Decrement the mastering display and content light level flag when IRAP
2764  // frame has no_rasl_output_flag=1 so the side data persists for the entire
2765  // coded video sequence.
2766  if (IS_IRAP(s) && s->no_rasl_output_flag) {
2767  if (s->sei.common.mastering_display.present > 0)
2768  s->sei.common.mastering_display.present--;
2769 
2770  if (s->sei.common.content_light.present > 0)
2771  s->sei.common.content_light.present--;
2772  }
2773 
2774  ret = ff_h2645_sei_to_frame(out, &s->sei.common, AV_CODEC_ID_HEVC, NULL,
2775  &s->ps.sps->vui.common,
2776  s->ps.sps->bit_depth, s->ps.sps->bit_depth_chroma,
2777  s->ref->poc /* no poc_offset in HEVC */);
2778  if (ret < 0)
2779  return ret;
2780 
2781  if (s->sei.timecode.present) {
2782  uint32_t *tc_sd;
2783  char tcbuf[AV_TIMECODE_STR_SIZE];
2785  sizeof(uint32_t) * 4);
2786  if (!tcside)
2787  return AVERROR(ENOMEM);
2788 
2789  tc_sd = (uint32_t*)tcside->data;
2790  tc_sd[0] = s->sei.timecode.num_clock_ts;
2791 
2792  for (int i = 0; i < tc_sd[0]; i++) {
2793  int drop = s->sei.timecode.cnt_dropped_flag[i];
2794  int hh = s->sei.timecode.hours_value[i];
2795  int mm = s->sei.timecode.minutes_value[i];
2796  int ss = s->sei.timecode.seconds_value[i];
2797  int ff = s->sei.timecode.n_frames[i];
2798 
2799  tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
2800  av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
2801  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
2802  }
2803 
2804  s->sei.timecode.num_clock_ts = 0;
2805  }
2806 
2807  if (s->sei.common.dynamic_hdr_plus.info) {
2808  AVBufferRef *info_ref = av_buffer_ref(s->sei.common.dynamic_hdr_plus.info);
2809  if (!info_ref)
2810  return AVERROR(ENOMEM);
2811 
2813  av_buffer_unref(&info_ref);
2814  return AVERROR(ENOMEM);
2815  }
2816  }
2817 
2818  if (s->rpu_buf) {
2820  if (!rpu)
2821  return AVERROR(ENOMEM);
2822 
2823  s->rpu_buf = NULL;
2824  }
2825 
2826  if ((ret = ff_dovi_attach_side_data(&s->dovi_ctx, out)) < 0)
2827  return ret;
2828 
2829  if (s->sei.common.dynamic_hdr_vivid.info) {
2830  AVBufferRef *info_ref = av_buffer_ref(s->sei.common.dynamic_hdr_vivid.info);
2831  if (!info_ref)
2832  return AVERROR(ENOMEM);
2833 
2835  av_buffer_unref(&info_ref);
2836  return AVERROR(ENOMEM);
2837  }
2838  }
2839 
2840  return 0;
2841 }
2842 
2844 {
2845  HEVCLocalContext *lc = s->HEVClc;
2846  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2847  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2848  int ret;
2849 
2850  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2851  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2852  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2853  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2854  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2855 
2856  s->is_decoded = 0;
2857  s->first_nal_type = s->nal_unit_type;
2858 
2859  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
2860 
2861  if (s->ps.pps->tiles_enabled_flag)
2862  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2863 
2864  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
2865  if (ret < 0)
2866  goto fail;
2867 
2868  ret = ff_hevc_frame_rps(s);
2869  if (ret < 0) {
2870  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2871  goto fail;
2872  }
2873 
2874  if (IS_IRAP(s))
2875  s->ref->frame->flags |= AV_FRAME_FLAG_KEY;
2876  else
2877  s->ref->frame->flags &= ~AV_FRAME_FLAG_KEY;
2878 
2879  s->ref->needs_fg = s->sei.common.film_grain_characteristics.present &&
2880  !(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) &&
2881  !s->avctx->hwaccel;
2882 
2883  if (s->ref->needs_fg &&
2884  !ff_h274_film_grain_params_supported(s->sei.common.film_grain_characteristics.model_id,
2885  s->ref->frame->format)) {
2886  av_log_once(s->avctx, AV_LOG_WARNING, AV_LOG_DEBUG, &s->film_grain_warning_shown,
2887  "Unsupported film grain parameters. Ignoring film grain.\n");
2888  s->ref->needs_fg = 0;
2889  }
2890 
2891  if (s->ref->needs_fg) {
2892  s->ref->frame_grain->format = s->ref->frame->format;
2893  s->ref->frame_grain->width = s->ref->frame->width;
2894  s->ref->frame_grain->height = s->ref->frame->height;
2895  if ((ret = ff_thread_get_buffer(s->avctx, s->ref->frame_grain, 0)) < 0)
2896  goto fail;
2897  }
2898 
2899  ret = set_side_data(s);
2900  if (ret < 0)
2901  goto fail;
2902 
2903  s->frame->pict_type = 3 - s->sh.slice_type;
2904 
2905  if (!IS_IRAP(s))
2907 
2908  av_frame_unref(s->output_frame);
2909  ret = ff_hevc_output_frame(s, s->output_frame, 0);
2910  if (ret < 0)
2911  goto fail;
2912 
2913  if (!s->avctx->hwaccel)
2914  ff_thread_finish_setup(s->avctx);
2915 
2916  return 0;
2917 
2918 fail:
2919  if (s->ref)
2920  ff_hevc_unref_frame(s->ref, ~0);
2921  s->ref = s->collocated_ref = NULL;
2922  return ret;
2923 }
2924 
2926 {
2927  HEVCFrame *out = s->ref;
2928  const AVFrameSideData *sd;
2929  av_unused int ret;
2930 
2931  if (out->needs_fg) {
2933  av_assert0(out->frame_grain->buf[0] && sd);
2934  ret = ff_h274_apply_film_grain(out->frame_grain, out->frame, &s->h274db,
2935  (AVFilmGrainParams *) sd->data);
2936  av_assert1(ret >= 0);
2937  }
2938 
2939  return 0;
2940 }
2941 
2942 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
2943 {
2944  HEVCLocalContext *lc = s->HEVClc;
2945  GetBitContext *gb = &lc->gb;
2946  int ctb_addr_ts, ret;
2947 
2948  *gb = nal->gb;
2949  s->nal_unit_type = nal->type;
2950  s->temporal_id = nal->temporal_id;
2951 
2952  switch (s->nal_unit_type) {
2953  case HEVC_NAL_VPS:
2954  if (FF_HW_HAS_CB(s->avctx, decode_params)) {
2955  ret = FF_HW_CALL(s->avctx, decode_params,
2956  nal->type, nal->raw_data, nal->raw_size);
2957  if (ret < 0)
2958  goto fail;
2959  }
2960  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
2961  if (ret < 0)
2962  goto fail;
2963  break;
2964  case HEVC_NAL_SPS:
2965  if (FF_HW_HAS_CB(s->avctx, decode_params)) {
2966  ret = FF_HW_CALL(s->avctx, decode_params,
2967  nal->type, nal->raw_data, nal->raw_size);
2968  if (ret < 0)
2969  goto fail;
2970  }
2971  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
2972  s->apply_defdispwin);
2973  if (ret < 0)
2974  goto fail;
2975  break;
2976  case HEVC_NAL_PPS:
2977  if (FF_HW_HAS_CB(s->avctx, decode_params)) {
2978  ret = FF_HW_CALL(s->avctx, decode_params,
2979  nal->type, nal->raw_data, nal->raw_size);
2980  if (ret < 0)
2981  goto fail;
2982  }
2983  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
2984  if (ret < 0)
2985  goto fail;
2986  break;
2987  case HEVC_NAL_SEI_PREFIX:
2988  case HEVC_NAL_SEI_SUFFIX:
2989  if (FF_HW_HAS_CB(s->avctx, decode_params)) {
2990  ret = FF_HW_CALL(s->avctx, decode_params,
2991  nal->type, nal->raw_data, nal->raw_size);
2992  if (ret < 0)
2993  goto fail;
2994  }
2995  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
2996  if (ret < 0)
2997  goto fail;
2998  break;
2999  case HEVC_NAL_TRAIL_R:
3000  case HEVC_NAL_TRAIL_N:
3001  case HEVC_NAL_TSA_N:
3002  case HEVC_NAL_TSA_R:
3003  case HEVC_NAL_STSA_N:
3004  case HEVC_NAL_STSA_R:
3005  case HEVC_NAL_BLA_W_LP:
3006  case HEVC_NAL_BLA_W_RADL:
3007  case HEVC_NAL_BLA_N_LP:
3008  case HEVC_NAL_IDR_W_RADL:
3009  case HEVC_NAL_IDR_N_LP:
3010  case HEVC_NAL_CRA_NUT:
3011  case HEVC_NAL_RADL_N:
3012  case HEVC_NAL_RADL_R:
3013  case HEVC_NAL_RASL_N:
3014  case HEVC_NAL_RASL_R:
3015  ret = hls_slice_header(s);
3016  if (ret < 0)
3017  return ret;
3018  if (ret == 1) {
3020  goto fail;
3021  }
3022 
3023 
3024  if (
3025  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
3026  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
3027  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
3028  break;
3029  }
3030 
3031  if (s->sh.first_slice_in_pic_flag) {
3032  if (s->max_ra == INT_MAX) {
3033  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
3034  s->max_ra = s->poc;
3035  } else {
3036  if (IS_IDR(s))
3037  s->max_ra = INT_MIN;
3038  }
3039  }
3040 
3041  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
3042  s->poc <= s->max_ra) {
3043  s->is_decoded = 0;
3044  break;
3045  } else {
3046  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
3047  s->max_ra = INT_MIN;
3048  }
3049 
3050  s->overlap ++;
3051  ret = hevc_frame_start(s);
3052  if (ret < 0)
3053  return ret;
3054  } else if (!s->ref) {
3055  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
3056  goto fail;
3057  }
3058 
3059  if (s->nal_unit_type != s->first_nal_type) {
3060  av_log(s->avctx, AV_LOG_ERROR,
3061  "Non-matching NAL types of the VCL NALUs: %d %d\n",
3062  s->first_nal_type, s->nal_unit_type);
3063  return AVERROR_INVALIDDATA;
3064  }
3065 
3066  if (!s->sh.dependent_slice_segment_flag &&
3067  s->sh.slice_type != HEVC_SLICE_I) {
3068  ret = ff_hevc_slice_rpl(s);
3069  if (ret < 0) {
3070  av_log(s->avctx, AV_LOG_WARNING,
3071  "Error constructing the reference lists for the current slice.\n");
3072  goto fail;
3073  }
3074  }
3075 
3076  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3077  ret = FF_HW_CALL(s->avctx, start_frame, NULL, 0);
3078  if (ret < 0)
3079  goto fail;
3080  }
3081 
3082  if (s->avctx->hwaccel) {
3083  ret = FF_HW_CALL(s->avctx, decode_slice, nal->raw_data, nal->raw_size);
3084  if (ret < 0)
3085  goto fail;
3086  } else {
3087  if (s->avctx->profile == AV_PROFILE_HEVC_SCC) {
3088  av_log(s->avctx, AV_LOG_ERROR,
3089  "SCC profile is not yet implemented in hevc native decoder.\n");
3091  goto fail;
3092  }
3093 
3094  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3095  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3096  else
3097  ctb_addr_ts = hls_slice_data(s);
3098  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3099  ret = hevc_frame_end(s);
3100  if (ret < 0)
3101  goto fail;
3102  s->is_decoded = 1;
3103  }
3104 
3105  if (ctb_addr_ts < 0) {
3106  ret = ctb_addr_ts;
3107  goto fail;
3108  }
3109  }
3110  break;
3111  case HEVC_NAL_EOS_NUT:
3112  case HEVC_NAL_EOB_NUT:
3113  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3114  s->max_ra = INT_MAX;
3115  break;
3116  case HEVC_NAL_AUD:
3117  case HEVC_NAL_FD_NUT:
3118  case HEVC_NAL_UNSPEC62:
3119  break;
3120  default:
3121  av_log(s->avctx, AV_LOG_INFO,
3122  "Skipping NAL unit %d\n", s->nal_unit_type);
3123  }
3124 
3125  return 0;
3126 fail:
3127  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3128  return ret;
3129  return 0;
3130 }
3131 
3132 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3133 {
3134  int i, ret = 0;
3135  int eos_at_start = 1;
3136 
3137  s->ref = s->collocated_ref = NULL;
3138  s->last_eos = s->eos;
3139  s->eos = 0;
3140  s->overlap = 0;
3141 
3142  /* split the input packet into NAL units, so we know the upper bound on the
3143  * number of slices in the frame */
3144  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3145  s->nal_length_size, s->avctx->codec_id, 1, 0);
3146  if (ret < 0) {
3147  av_log(s->avctx, AV_LOG_ERROR,
3148  "Error splitting the input into NAL units.\n");
3149  return ret;
3150  }
3151 
3152  for (i = 0; i < s->pkt.nb_nals; i++) {
3153  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3154  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3155  if (eos_at_start) {
3156  s->last_eos = 1;
3157  } else {
3158  s->eos = 1;
3159  }
3160  } else {
3161  eos_at_start = 0;
3162  }
3163  }
3164 
3165  /*
3166  * Check for RPU delimiter.
3167  *
3168  * Dolby Vision RPUs masquerade as unregistered NALs of type 62.
3169  *
3170  * We have to do this check here an create the rpu buffer, since RPUs are appended
3171  * to the end of an AU; they are the last non-EOB/EOS NAL in the AU.
3172  */
3173  if (s->pkt.nb_nals > 1 && s->pkt.nals[s->pkt.nb_nals - 1].type == HEVC_NAL_UNSPEC62 &&
3174  s->pkt.nals[s->pkt.nb_nals - 1].size > 2 && !s->pkt.nals[s->pkt.nb_nals - 1].nuh_layer_id
3175  && !s->pkt.nals[s->pkt.nb_nals - 1].temporal_id) {
3176  H2645NAL *nal = &s->pkt.nals[s->pkt.nb_nals - 1];
3177  if (s->rpu_buf) {
3178  av_buffer_unref(&s->rpu_buf);
3179  av_log(s->avctx, AV_LOG_WARNING, "Multiple Dolby Vision RPUs found in one AU. Skipping previous.\n");
3180  }
3181 
3182  s->rpu_buf = av_buffer_alloc(nal->raw_size - 2);
3183  if (!s->rpu_buf)
3184  return AVERROR(ENOMEM);
3185  memcpy(s->rpu_buf->data, nal->raw_data + 2, nal->raw_size - 2);
3186 
3187  ret = ff_dovi_rpu_parse(&s->dovi_ctx, nal->data + 2, nal->size - 2);
3188  if (ret < 0) {
3189  av_buffer_unref(&s->rpu_buf);
3190  av_log(s->avctx, AV_LOG_WARNING, "Error parsing DOVI NAL unit.\n");
3191  /* ignore */
3192  }
3193  }
3194 
3195  /* decode the NAL units */
3196  for (i = 0; i < s->pkt.nb_nals; i++) {
3197  H2645NAL *nal = &s->pkt.nals[i];
3198 
3199  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3200  (s->avctx->skip_frame >= AVDISCARD_NONREF
3201  && ff_hevc_nal_is_nonref(nal->type)) || nal->nuh_layer_id > 0)
3202  continue;
3203 
3204  ret = decode_nal_unit(s, nal);
3205  if (ret >= 0 && s->overlap > 2)
3207  if (ret < 0) {
3208  av_log(s->avctx, AV_LOG_WARNING,
3209  "Error parsing NAL unit #%d.\n", i);
3210  goto fail;
3211  }
3212  }
3213 
3214 fail:
3215  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3216  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3217 
3218  return ret;
3219 }
3220 
3222 {
3224  char msg_buf[4 * (50 + 2 * 2 * 16 /* MD5-size */)];
3225  int pixel_shift;
3226  int err = 0;
3227  int i, j;
3228 
3229  if (!desc)
3230  return AVERROR(EINVAL);
3231 
3232  pixel_shift = desc->comp[0].depth > 8;
3233 
3234  /* the checksums are LE, so we have to byteswap for >8bpp formats
3235  * on BE arches */
3236 #if HAVE_BIGENDIAN
3237  if (pixel_shift && !s->checksum_buf) {
3238  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3239  FFMAX3(frame->linesize[0], frame->linesize[1],
3240  frame->linesize[2]));
3241  if (!s->checksum_buf)
3242  return AVERROR(ENOMEM);
3243  }
3244 #endif
3245 
3246  msg_buf[0] = '\0';
3247  for (i = 0; frame->data[i]; i++) {
3248  int width = s->avctx->coded_width;
3249  int height = s->avctx->coded_height;
3250  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3251  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3252  uint8_t md5[16];
3253 
3254  av_md5_init(s->md5_ctx);
3255  for (j = 0; j < h; j++) {
3256  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3257 #if HAVE_BIGENDIAN
3258  if (pixel_shift) {
3259  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3260  (const uint16_t *) src, w);
3261  src = s->checksum_buf;
3262  }
3263 #endif
3264  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3265  }
3266  av_md5_final(s->md5_ctx, md5);
3267 
3268 #define MD5_PRI "%016" PRIx64 "%016" PRIx64
3269 #define MD5_PRI_ARG(buf) AV_RB64(buf), AV_RB64((const uint8_t*)(buf) + 8)
3270 
3271  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3272  av_strlcatf(msg_buf, sizeof(msg_buf),
3273  "plane %d - correct " MD5_PRI "; ",
3274  i, MD5_PRI_ARG(md5));
3275  } else {
3276  av_strlcatf(msg_buf, sizeof(msg_buf),
3277  "mismatching checksum of plane %d - " MD5_PRI " != " MD5_PRI "; ",
3278  i, MD5_PRI_ARG(md5), MD5_PRI_ARG(s->sei.picture_hash.md5[i]));
3279  err = AVERROR_INVALIDDATA;
3280  }
3281  }
3282 
3283  av_log(s->avctx, err < 0 ? AV_LOG_ERROR : AV_LOG_DEBUG,
3284  "Verifying checksum for frame with POC %d: %s\n",
3285  s->poc, msg_buf);
3286 
3287  return err;
3288 }
3289 
3290 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
3291 {
3292  int ret, i;
3293 
3294  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3295  &s->nal_length_size, s->avctx->err_recognition,
3296  s->apply_defdispwin, s->avctx);
3297  if (ret < 0)
3298  return ret;
3299 
3300  /* export stream parameters from the first SPS */
3301  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3302  if (first && s->ps.sps_list[i]) {
3303  const HEVCSPS *sps = s->ps.sps_list[i];
3305  break;
3306  }
3307  }
3308 
3309  /* export stream parameters from SEI */
3311  if (ret < 0)
3312  return ret;
3313 
3314  return 0;
3315 }
3316 
3317 static int hevc_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
3318  int *got_output, AVPacket *avpkt)
3319 {
3320  int ret;
3321  uint8_t *sd;
3322  size_t sd_size;
3323  HEVCContext *s = avctx->priv_data;
3324 
3325  if (!avpkt->size) {
3326  ret = ff_hevc_output_frame(s, rframe, 1);
3327  if (ret < 0)
3328  return ret;
3329 
3330  *got_output = ret;
3331  return 0;
3332  }
3333 
3334  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &sd_size);
3335  if (sd && sd_size > 0) {
3336  ret = hevc_decode_extradata(s, sd, sd_size, 0);
3337  if (ret < 0)
3338  return ret;
3339  }
3340 
3341  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_DOVI_CONF, &sd_size);
3342  if (sd && sd_size > 0) {
3343  int old = s->dovi_ctx.dv_profile;
3344 
3346  if (old)
3347  av_log(avctx, AV_LOG_DEBUG,
3348  "New DOVI configuration record from input packet (profile %d -> %u).\n",
3349  old, s->dovi_ctx.dv_profile);
3350  }
3351 
3352  s->ref = s->collocated_ref = NULL;
3353  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3354  if (ret < 0)
3355  return ret;
3356 
3357  if (avctx->hwaccel) {
3358  if (s->ref && (ret = FF_HW_SIMPLE_CALL(avctx, end_frame)) < 0) {
3359  av_log(avctx, AV_LOG_ERROR,
3360  "hardware accelerator failed to decode picture\n");
3361  ff_hevc_unref_frame(s->ref, ~0);
3362  return ret;
3363  }
3364  } else {
3365  /* verify the SEI checksum */
3366  if (avctx->err_recognition & AV_EF_CRCCHECK && s->ref && s->is_decoded &&
3367  s->sei.picture_hash.is_md5) {
3368  ret = verify_md5(s, s->ref->frame);
3369  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3370  ff_hevc_unref_frame(s->ref, ~0);
3371  return ret;
3372  }
3373  }
3374  }
3375  s->sei.picture_hash.is_md5 = 0;
3376 
3377  if (s->is_decoded) {
3378  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3379  s->is_decoded = 0;
3380  }
3381 
3382  if (s->output_frame->buf[0]) {
3383  av_frame_move_ref(rframe, s->output_frame);
3384  *got_output = 1;
3385  }
3386 
3387  return avpkt->size;
3388 }
3389 
3391 {
3392  int ret;
3393 
3394  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3395  if (ret < 0)
3396  return ret;
3397 
3398  if (src->needs_fg) {
3399  ret = av_frame_ref(dst->frame_grain, src->frame_grain);
3400  if (ret < 0)
3401  return ret;
3402  dst->needs_fg = 1;
3403  }
3404 
3405  dst->tab_mvf = ff_refstruct_ref(src->tab_mvf);
3406  dst->rpl_tab = ff_refstruct_ref(src->rpl_tab);
3407  dst->rpl = ff_refstruct_ref(src->rpl);
3408  dst->nb_rpl_elems = src->nb_rpl_elems;
3409 
3410  dst->poc = src->poc;
3411  dst->ctb_count = src->ctb_count;
3412  dst->flags = src->flags;
3413  dst->sequence = src->sequence;
3414 
3416  src->hwaccel_picture_private);
3417 
3418  return 0;
3419 }
3420 
3422 {
3423  HEVCContext *s = avctx->priv_data;
3424  int i;
3425 
3426  pic_arrays_free(s);
3427 
3428  ff_dovi_ctx_unref(&s->dovi_ctx);
3429  av_buffer_unref(&s->rpu_buf);
3430 
3431  av_freep(&s->md5_ctx);
3432 
3433  for (i = 0; i < 3; i++) {
3434  av_freep(&s->sao_pixel_buffer_h[i]);
3435  av_freep(&s->sao_pixel_buffer_v[i]);
3436  }
3437  av_frame_free(&s->output_frame);
3438 
3439  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3440  ff_hevc_unref_frame(&s->DPB[i], ~0);
3441  av_frame_free(&s->DPB[i].frame);
3442  av_frame_free(&s->DPB[i].frame_grain);
3443  }
3444 
3445  ff_hevc_ps_uninit(&s->ps);
3446 
3447  av_freep(&s->sh.entry_point_offset);
3448  av_freep(&s->sh.offset);
3449  av_freep(&s->sh.size);
3450 
3451  if (s->HEVClcList) {
3452  for (i = 1; i < s->threads_number; i++) {
3453  av_freep(&s->HEVClcList[i]);
3454  }
3455  }
3456  av_freep(&s->HEVClc);
3457  av_freep(&s->HEVClcList);
3458 
3459  ff_h2645_packet_uninit(&s->pkt);
3460 
3461  ff_hevc_reset_sei(&s->sei);
3462 
3463  return 0;
3464 }
3465 
3467 {
3468  HEVCContext *s = avctx->priv_data;
3469  int i;
3470 
3471  s->avctx = avctx;
3472 
3473  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3474  s->HEVClcList = av_mallocz(sizeof(HEVCLocalContext*) * s->threads_number);
3475  if (!s->HEVClc || !s->HEVClcList)
3476  return AVERROR(ENOMEM);
3477  s->HEVClc->parent = s;
3478  s->HEVClc->logctx = avctx;
3479  s->HEVClc->common_cabac_state = &s->cabac;
3480  s->HEVClcList[0] = s->HEVClc;
3481 
3482  s->output_frame = av_frame_alloc();
3483  if (!s->output_frame)
3484  return AVERROR(ENOMEM);
3485 
3486  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3487  s->DPB[i].frame = av_frame_alloc();
3488  if (!s->DPB[i].frame)
3489  return AVERROR(ENOMEM);
3490  s->DPB[i].tf.f = s->DPB[i].frame;
3491 
3492  s->DPB[i].frame_grain = av_frame_alloc();
3493  if (!s->DPB[i].frame_grain)
3494  return AVERROR(ENOMEM);
3495  }
3496 
3497  s->max_ra = INT_MAX;
3498 
3499  s->md5_ctx = av_md5_alloc();
3500  if (!s->md5_ctx)
3501  return AVERROR(ENOMEM);
3502 
3503  ff_bswapdsp_init(&s->bdsp);
3504 
3505  s->dovi_ctx.logctx = avctx;
3506  s->eos = 0;
3507 
3508  ff_hevc_reset_sei(&s->sei);
3509 
3510  return 0;
3511 }
3512 
3513 #if HAVE_THREADS
3514 static int hevc_update_thread_context(AVCodecContext *dst,
3515  const AVCodecContext *src)
3516 {
3517  HEVCContext *s = dst->priv_data;
3518  HEVCContext *s0 = src->priv_data;
3519  int i, ret;
3520 
3521  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3522  ff_hevc_unref_frame(&s->DPB[i], ~0);
3523  if (s0->DPB[i].frame->buf[0]) {
3524  ret = hevc_ref_frame(&s->DPB[i], &s0->DPB[i]);
3525  if (ret < 0)
3526  return ret;
3527  }
3528  }
3529 
3530  if (s->ps.sps != s0->ps.sps)
3531  s->ps.sps = NULL;
3532  for (int i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++)
3533  ff_refstruct_replace(&s->ps.vps_list[i], s0->ps.vps_list[i]);
3534 
3535  for (int i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++)
3536  ff_refstruct_replace(&s->ps.sps_list[i], s0->ps.sps_list[i]);
3537 
3538  for (int i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++)
3539  ff_refstruct_replace(&s->ps.pps_list[i], s0->ps.pps_list[i]);
3540 
3541  if (s->ps.sps != s0->ps.sps)
3542  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3543  return ret;
3544 
3545  s->seq_decode = s0->seq_decode;
3546  s->seq_output = s0->seq_output;
3547  s->pocTid0 = s0->pocTid0;
3548  s->max_ra = s0->max_ra;
3549  s->eos = s0->eos;
3550  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3551 
3552  s->is_nalff = s0->is_nalff;
3553  s->nal_length_size = s0->nal_length_size;
3554 
3555  s->threads_number = s0->threads_number;
3556  s->threads_type = s0->threads_type;
3557 
3558  s->film_grain_warning_shown = s0->film_grain_warning_shown;
3559 
3560  if (s0->eos) {
3561  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3562  s->max_ra = INT_MAX;
3563  }
3564 
3565  ret = ff_h2645_sei_ctx_replace(&s->sei.common, &s0->sei.common);
3566  if (ret < 0)
3567  return ret;
3568 
3569  ret = av_buffer_replace(&s->sei.common.dynamic_hdr_plus.info,
3570  s0->sei.common.dynamic_hdr_plus.info);
3571  if (ret < 0)
3572  return ret;
3573 
3574  ret = av_buffer_replace(&s->rpu_buf, s0->rpu_buf);
3575  if (ret < 0)
3576  return ret;
3577 
3578  ff_dovi_ctx_replace(&s->dovi_ctx, &s0->dovi_ctx);
3579 
3580  ret = av_buffer_replace(&s->sei.common.dynamic_hdr_vivid.info,
3581  s0->sei.common.dynamic_hdr_vivid.info);
3582  if (ret < 0)
3583  return ret;
3584 
3585  s->sei.common.frame_packing = s0->sei.common.frame_packing;
3586  s->sei.common.display_orientation = s0->sei.common.display_orientation;
3587  s->sei.common.alternative_transfer = s0->sei.common.alternative_transfer;
3588  s->sei.common.mastering_display = s0->sei.common.mastering_display;
3589  s->sei.common.content_light = s0->sei.common.content_light;
3590 
3592  if (ret < 0)
3593  return ret;
3594 
3595  return 0;
3596 }
3597 #endif
3598 
3600 {
3601  HEVCContext *s = avctx->priv_data;
3602  int ret;
3603 
3604  if (avctx->active_thread_type & FF_THREAD_SLICE) {
3605  s->threads_number = avctx->thread_count;
3607  if (ret < 0)
3608  return ret;
3609  } else
3610  s->threads_number = 1;
3611 
3612  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3613  s->threads_type = FF_THREAD_FRAME;
3614  else
3615  s->threads_type = FF_THREAD_SLICE;
3616 
3617  ret = hevc_init_context(avctx);
3618  if (ret < 0)
3619  return ret;
3620 
3621  s->enable_parallel_tiles = 0;
3622  s->sei.picture_timing.picture_struct = 0;
3623  s->eos = 1;
3624 
3625  atomic_init(&s->wpp_err, 0);
3626 
3627  if (!avctx->internal->is_copy) {
3628  const AVPacketSideData *sd;
3629 
3630  if (avctx->extradata_size > 0 && avctx->extradata) {
3631  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3632  if (ret < 0) {
3633  return ret;
3634  }
3635  }
3636 
3638  if (sd && sd->size > 0)
3640  }
3641 
3642  return 0;
3643 }
3644 
3646 {
3647  HEVCContext *s = avctx->priv_data;
3649  ff_hevc_reset_sei(&s->sei);
3650  ff_dovi_ctx_flush(&s->dovi_ctx);
3651  av_buffer_unref(&s->rpu_buf);
3652  s->max_ra = INT_MAX;
3653  s->eos = 1;
3654 
3655  if (FF_HW_HAS_CB(avctx, flush))
3656  FF_HW_SIMPLE_CALL(avctx, flush);
3657 }
3658 
3659 #define OFFSET(x) offsetof(HEVCContext, x)
3660 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3661 
3662 static const AVOption options[] = {
3663  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3664  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3665  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3666  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3667  { NULL },
3668 };
3669 
3670 static const AVClass hevc_decoder_class = {
3671  .class_name = "HEVC decoder",
3672  .item_name = av_default_item_name,
3673  .option = options,
3674  .version = LIBAVUTIL_VERSION_INT,
3675 };
3676 
3678  .p.name = "hevc",
3679  CODEC_LONG_NAME("HEVC (High Efficiency Video Coding)"),
3680  .p.type = AVMEDIA_TYPE_VIDEO,
3681  .p.id = AV_CODEC_ID_HEVC,
3682  .priv_data_size = sizeof(HEVCContext),
3683  .p.priv_class = &hevc_decoder_class,
3684  .init = hevc_decode_init,
3685  .close = hevc_decode_free,
3687  .flush = hevc_decode_flush,
3688  UPDATE_THREAD_CONTEXT(hevc_update_thread_context),
3689  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3691  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3693  .p.profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles),
3694  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3695 #if CONFIG_HEVC_DXVA2_HWACCEL
3696  HWACCEL_DXVA2(hevc),
3697 #endif
3698 #if CONFIG_HEVC_D3D11VA_HWACCEL
3699  HWACCEL_D3D11VA(hevc),
3700 #endif
3701 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3702  HWACCEL_D3D11VA2(hevc),
3703 #endif
3704 #if CONFIG_HEVC_NVDEC_HWACCEL
3705  HWACCEL_NVDEC(hevc),
3706 #endif
3707 #if CONFIG_HEVC_VAAPI_HWACCEL
3708  HWACCEL_VAAPI(hevc),
3709 #endif
3710 #if CONFIG_HEVC_VDPAU_HWACCEL
3711  HWACCEL_VDPAU(hevc),
3712 #endif
3713 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3714  HWACCEL_VIDEOTOOLBOX(hevc),
3715 #endif
3716 #if CONFIG_HEVC_VULKAN_HWACCEL
3717  HWACCEL_VULKAN(hevc),
3718 #endif
3719  NULL
3720  },
3721 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
ff_hevc_sao_offset_sign_decode
int ff_hevc_sao_offset_sign_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:606
ff_get_coded_side_data
const AVPacketSideData * ff_get_coded_side_data(const AVCodecContext *avctx, enum AVPacketSideDataType type)
Get side data of the given type from a decoding context.
Definition: decode.c:1402
verify_md5
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3221
hwconfig.h
MD5_PRI
#define MD5_PRI
HEVC_NAL_RADL_N
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1435
SliceHeader::beta_offset
int beta_offset
beta_offset_div2 * 2
Definition: hevcdec.h:304
bswapdsp.h
L1
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
Definition: snow.txt:554
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
HEVCLocalContext
Definition: hevcdec.h:434
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
HEVCFrame::flags
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:431
SliceHeader::slice_act_cr_qp_offset
int slice_act_cr_qp_offset
Definition: hevcdec.h:300
HWACCEL_MAX
#define HWACCEL_MAX
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
HEVCFrame::tf
ThreadFrame tf
Definition: hevcdec.h:409
ff_hevc_hls_residual_coding
void ff_hevc_hls_residual_coding(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1039
ff_hevc_skip_flag_decode
int ff_hevc_skip_flag_decode(HEVCLocalContext *lc, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:628
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
ff_hevc_cu_chroma_qp_offset_idx
int ff_hevc_cu_chroma_qp_offset_idx(HEVCLocalContext *lc)
Definition: hevc_cabac.c:681
av_clip
#define av_clip
Definition: common.h:96
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
ff_hevc_pcm_flag_decode
int ff_hevc_pcm_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:755
set_deblocking_bypass
static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1346
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
ff_refstruct_ref
void * ff_refstruct_ref(void *obj)
Create a new reference to an object managed via this API, i.e.
Definition: refstruct.c:140
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
ff_hevc_pred_init
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
decode_slice
static int decode_slice(AVCodecContext *c, void *arg)
Definition: ffv1dec.c:255
opt.h
ff_dovi_ctx_unref
void ff_dovi_ctx_unref(DOVIContext *s)
Completely reset a DOVIContext, preserving only logctx.
Definition: dovi_rpu.c:44
chroma_mc_uni
static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, const struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
Definition: hevcdec.c:1695
hevc_decode_flush
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3645
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1029
PART_NxN
@ PART_NxN
Definition: hevcdec.h:145
decode_nal_unit
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2942
SliceHeader::slice_act_y_qp_offset
int slice_act_y_qp_offset
Definition: hevcdec.h:298
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1264
ff_refstruct_pool_alloc
FFRefStructPool * ff_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to ff_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
Definition: refstruct.c:335
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:824
HEVCWindow::bottom_offset
unsigned int bottom_offset
Definition: hevc_ps.h:90
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:812
SAO_BAND
@ SAO_BAND
Definition: hevcdec.h:211
ff_hevc_profiles
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:95
ff_hevc_pred_mode_decode
int ff_hevc_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:692
AV_PKT_DATA_NEW_EXTRADATA
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
Definition: packet.h:56
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2964
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
ff_h2645_sei_to_frame
int ff_h2645_sei_to_frame(AVFrame *frame, H2645SEI *sei, enum AVCodecID codec_id, AVCodecContext *avctx, const H2645VUI *vui, unsigned bit_depth_luma, unsigned bit_depth_chroma, int seed)
Definition: h2645_sei.c:511
src1
const pixel * src1
Definition: h264pred_template.c:421
set_ct_depth
static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2115
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1412
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:421
HEVCLocalContext::ctb_up_flag
uint8_t ctb_up_flag
Definition: hevcdec.h:467
HEVCFrame::needs_fg
int needs_fg
Definition: hevcdec.h:410
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
SliceHeader::num_entry_point_offsets
int num_entry_point_offsets
Definition: hevcdec.h:313
HEVC_NAL_STSA_N
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
HEVCFrame::frame_grain
AVFrame * frame_grain
Definition: hevcdec.h:408
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:184
PART_2NxnU
@ PART_2NxnU
Definition: hevcdec.h:146
av_unused
#define av_unused
Definition: attributes.h:131
ff_hevc_luma_mv_mvp_mode
void ff_hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:583
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:123
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
luma_intra_pred_mode
static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:2035
H2645NAL::nuh_layer_id
int nuh_layer_id
Definition: h2645_parse.h:67
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
HEVCFrame::tab_mvf
MvField * tab_mvf
RefStruct reference.
Definition: hevcdec.h:411
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1022
TransformUnit::cu_qp_delta
int cu_qp_delta
Definition: hevcdec.h:378
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:342
HEVC_NAL_TSA_N
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
ff_hevc_cu_transquant_bypass_flag_decode
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:623
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:673
HEVCFrame::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference.
Definition: hevcdec.h:420
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:491
PAR
#define PAR
Definition: hevcdec.c:3660
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:173
AVOption
AVOption.
Definition: opt.h:251
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:597
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:573
hls_decode_entry
static int hls_decode_entry(AVCodecContext *avctxt, void *arg)
Definition: hevcdec.c:2519
hevc_decode_free
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3421
data
const char data[16]
Definition: mxf.c:148
Mv::y
int16_t y
vertical component of motion vector
Definition: hevcdec.h:349
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:197
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:468
SAO_EDGE
@ SAO_EDGE
Definition: hevcdec.h:212
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
SliceHeader::slice_temporal_mvp_enabled_flag
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevcdec.h:280
MvField::mv
Mv mv[2]
Definition: hevcdec.h:353
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
TransformUnit::is_cu_qp_delta_coded
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:386
FFCodec
Definition: codec_internal.h:127
HEVC_NAL_RASL_N
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
ff_hevc_intra_chroma_pred_mode_decode
int ff_hevc_intra_chroma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:783
HEVC_NAL_STSA_R
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
MODE_INTRA
@ MODE_INTRA
Definition: hevcdec.h:154
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
HEVC_NAL_BLA_W_RADL
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
SliceHeader::slice_loop_filter_across_slices_enabled_flag
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevcdec.h:289
SAOParams::offset_sign
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:36
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
export_stream_params
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:321
HEVCLocalContext::ctb_up_left_flag
uint8_t ctb_up_left_flag
Definition: hevcdec.h:469
H2645NAL::temporal_id
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:62
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
RefPicList
Definition: hevcdec.h:241
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
OFFSET
#define OFFSET(x)
Definition: hevcdec.c:3659
PF_INTRA
@ PF_INTRA
Definition: hevcdec.h:165
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:376
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in FFCodec caps_internal and use ff_thread_get_buffer() to allocate frames. Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
ff_hevc_cu_qp_delta_sign_flag
int ff_hevc_cu_qp_delta_sign_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:671
hls_decode_neighbour
static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2470
MODE_SKIP
@ MODE_SKIP
Definition: hevcdec.h:155
HEVCLocalContext::end_of_tiles_x
int end_of_tiles_x
Definition: hevcdec.h:470
AV_PKT_DATA_DOVI_CONF
@ AV_PKT_DATA_DOVI_CONF
DOVI configuration ref: dolby-vision-bitstreams-within-the-iso-base-media-file-format-v2....
Definition: packet.h:284
CodingUnit::x
int x
Definition: hevcdec.h:335
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
BOUNDARY_LEFT_TILE
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:484
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1803
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:57
AVPacketSideData::size
size_t size
Definition: packet.h:344
PART_2Nx2N
@ PART_2Nx2N
Definition: hevcdec.h:142
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
SET_SAO
#define SET_SAO(elem, value)
Definition: hevcdec.c:1037
HEVCLocalContext::ctb_up_right_flag
uint8_t ctb_up_right_flag
Definition: hevcdec.h:468
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
ff_hevc_clear_refs
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:66
PRED_BI
@ PRED_BI
Definition: hevcdec.h:161
ff_hevc_log2_res_scale_abs
int ff_hevc_log2_res_scale_abs(HEVCLocalContext *lc, int idx)
Definition: hevc_cabac.c:909
ff_hevc_hls_mvd_coding
void ff_hevc_hls_mvd_coding(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1549
luma_mc_uni
static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride, const AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
Definition: hevcdec.c:1541
av_ceil_log2
#define av_ceil_log2
Definition: common.h:93
fail
#define fail()
Definition: checkasm.h:142
PredictionUnit::intra_pred_mode_c
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:373
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1532
ff_refstruct_pool_uninit
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
md5
struct AVMD5 * md5
Definition: movenc.c:56
InterPredIdc
InterPredIdc
Definition: hevcdec.h:158
MODE_INTER
@ MODE_INTER
Definition: hevcdec.h:153
ff_hevc_hls_filter
void ff_hevc_hls_filter(HEVCLocalContext *lc, int x, int y, int ctb_size)
Definition: hevc_filter.c:851
timecode.h
HEVCWindow::left_offset
unsigned int left_offset
Definition: hevc_ps.h:87
GetBitContext
Definition: get_bits.h:108
HEVCLocalContext::pu
PredictionUnit pu
Definition: hevcdec.h:480
ff_hevc_cu_chroma_qp_offset_flag
int ff_hevc_cu_chroma_qp_offset_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:676
decode_lt_rps
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:264
TransformUnit::res_scale_val
int res_scale_val
Definition: hevcdec.h:380
SliceHeader::short_term_ref_pic_set_size
int short_term_ref_pic_set_size
Definition: hevcdec.h:271
hevc_decoder_class
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3670
val
static double val(void *priv, double ch)
Definition: aeval.c:78
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
ff_hevc_output_frame
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:180
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:636
SliceHeader::long_term_ref_pic_set_size
int long_term_ref_pic_set_size
Definition: hevcdec.h:274
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
CTB
#define CTB(tab, x, y)
Definition: hevcdec.c:1035
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_hevc_decode_nal_sei
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, enum HEVCNALUnitType type)
Definition: hevc_sei.c:227
AVRational::num
int num
Numerator.
Definition: rational.h:59
intra_prediction_unit
static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2132
refstruct.h
HEVC_NAL_UNSPEC62
@ HEVC_NAL_UNSPEC62
Definition: hevc.h:91
ff_hevc_deblocking_boundary_strengths
void ff_hevc_deblocking_boundary_strengths(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:723
SliceHeader::slice_segment_addr
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevcdec.h:256
hevc_parse.h
MvField::ref_idx
int8_t ref_idx[2]
Definition: hevcdec.h:354
SAOParams::eo_class
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:40
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:471
hevc_luma_mv_mvp_mode
static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevcdec.c:1864
ff_thread_report_progress2
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
Definition: pthread_slice.c:210
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1015
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
QPEL_EXTRA_AFTER
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:64
HEVC_NAL_BLA_N_LP
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
SAOParams::type_idx
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:44
film_grain_params.h
HEVC_MAX_PPS_COUNT
@ HEVC_MAX_PPS_COUNT
Definition: hevc.h:114
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TransformUnit::intra_pred_mode
int intra_pred_mode
Definition: hevcdec.h:383
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
HEVC_NAL_RADL_R
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
hls_prediction_unit
static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
Definition: hevcdec.c:1909
hevc_ref_frame
static int hevc_ref_frame(HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3390
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:589
SliceHeader::cabac_init_flag
uint8_t cabac_init_flag
Definition: hevcdec.h:287
H2645NAL::size
int size
Definition: h2645_parse.h:36
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:543
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:744
hls_transform_unit
static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Definition: hevcdec.c:1139
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
QPEL_EXTRA_BEFORE
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:63
ff_hevc_rem_intra_luma_pred_mode_decode
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:773
ff_hevc_sao_merge_flag_decode
int ff_hevc_sao_merge_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:571
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
HEVCLocalContext::parent
const struct HEVCContext * parent
Definition: hevcdec.h:442
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_thread_await_progress2
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
Definition: pthread_slice.c:222
SAO_NOT_APPLIED
@ SAO_NOT_APPLIED
Definition: hevcdec.h:210
AV_PROFILE_HEVC_SCC
#define AV_PROFILE_HEVC_SCC
Definition: defs.h:162
set_sps
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:523
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:627
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
ff_hevc_nal_is_nonref
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:666
ff_hevc_set_new_ref
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:130
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
SliceHeader::slice_rps
ShortTermRPS slice_rps
Definition: hevcdec.h:272
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AVPacketSideData::data
uint8_t * data
Definition: packet.h:343
HEVCFrame::rpl
RefPicListTab * rpl
RefStruct reference.
Definition: hevcdec.h:417
decode.h
IS_IDR
#define IS_IDR(s)
Definition: hevcdec.h:75
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
ff_hevc_slice_rpl
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:306
RefPicList::ref
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:242
H2645NAL::skipped_bytes_pos
int * skipped_bytes_pos
Definition: h2645_parse.h:71
HEVCWindow::top_offset
unsigned int top_offset
Definition: hevc_ps.h:89
HEVC_SLICE_I
@ HEVC_SLICE_I
Definition: hevc.h:98
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SliceHeader::size
int * size
Definition: hevcdec.h:312
ff_hevc_cabac_init
int ff_hevc_cabac_init(HEVCLocalContext *lc, int ctb_addr_ts)
Definition: hevc_cabac.c:512
ff_hevc_set_neighbour_available
void ff_hevc_set_neighbour_available(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH)
Definition: hevc_mvs.c:43
SliceHeader::collocated_list
uint8_t collocated_list
Definition: hevcdec.h:290
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:548
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:216
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
INTRA_ANGULAR_26
@ INTRA_ANGULAR_26
Definition: hevcdec.h:198
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
CodingUnit::max_trafo_depth
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:343
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:211
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
SliceHeader::slice_ctb_addr_rs
int slice_ctb_addr_rs
Definition: hevcdec.h:331
frame
static AVFrame * frame
Definition: demux_decode.c:54
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:871
FF_CODEC_PROPERTY_FILM_GRAIN
#define FF_CODEC_PROPERTY_FILM_GRAIN
Definition: avcodec.h:1907
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
HEVC_NAL_IDR_N_LP
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
SliceHeader::pic_output_flag
uint8_t pic_output_flag
Definition: hevcdec.h:266
ff_hevc_cbf_cb_cr_decode
int ff_hevc_cbf_cb_cr_decode(HEVCLocalContext *lc, int trafo_depth)
Definition: hevc_cabac.c:884
hls_slice_data_wpp
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2671
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
HEVC_MAX_REFS
@ HEVC_MAX_REFS
Definition: hevc.h:119
threadframe.h
PredictionUnit::rem_intra_luma_pred_mode
int rem_intra_luma_pred_mode
Definition: hevcdec.h:369
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
IS_BLA
#define IS_BLA(s)
Definition: hevcdec.h:76
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
HEVC_SLICE_B
@ HEVC_SLICE_B
Definition: hevc.h:96
NULL
#define NULL
Definition: coverity.c:32
HEVC_SEQUENCE_COUNTER_MASK
#define HEVC_SEQUENCE_COUNTER_MASK
Definition: hevcdec.h:403
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
hls_coding_unit
static int hls_coding_unit(HEVCLocalContext *lc, const HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2223
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1039
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
HEVCLocalContext::tmp
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
Definition: hevcdec.h:476
ff_hevc_ps_uninit
void ff_hevc_ps_uninit(HEVCParamSets *ps)
Definition: hevc_ps.c:1999
hwaccel_internal.h
HEVC_NAL_PPS
@ HEVC_NAL_PPS
Definition: hevc.h:63
LongTermRPS::poc
int poc[32]
Definition: hevcdec.h:235
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:694
CodingUnit::cu_transquant_bypass_flag
uint8_t cu_transquant_bypass_flag
Definition: hevcdec.h:344
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:476
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
HEVCLocalContext::first_qp_group
uint8_t first_qp_group
Definition: hevcdec.h:439
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
ff_dovi_update_cfg
void ff_dovi_update_cfg(DOVIContext *s, const AVDOVIDecoderConfigurationRecord *cfg)
Read the contents of an AVDOVIDecoderConfigurationRecord (usually provided by stream side data) and u...
Definition: dovi_rpu.c:75
profiles.h
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:109
L0
#define L0
Definition: hevcdec.h:57
HEVCFrame::rpl_tab
RefPicListTab ** rpl_tab
RefStruct reference.
Definition: hevcdec.h:413
LongTermRPS::poc_msb_present
uint8_t poc_msb_present[32]
Definition: hevcdec.h:236
HEVC_NAL_SEI_SUFFIX
@ HEVC_NAL_SEI_SUFFIX
Definition: hevc.h:69
ff_hevc_sao_band_position_decode
int ff_hevc_sao_band_position_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:586
HEVC_NAL_CRA_NUT
@ HEVC_NAL_CRA_NUT
Definition: hevc.h:50
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:780
hevc_pel_weight
static const uint8_t hevc_pel_weight[65]
Definition: hevcdec.c:54
PART_Nx2N
@ PART_Nx2N
Definition: hevcdec.h:144
RefPicListTab
Definition: hevcdec.h:248
ff_hevc_split_coding_unit_flag_decode
int ff_hevc_split_coding_unit_flag_decode(HEVCLocalContext *lc, int ct_depth, int x0, int y0)
Definition: hevc_cabac.c:697
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
BOUNDARY_UPPER_TILE
#define BOUNDARY_UPPER_TILE
Definition: hevcdec.h:486
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
ff_hevc_decode_extradata
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
Definition: hevc_parse.c:80
AV_EF_CRCCHECK
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Definition: defs.h:48
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:177
SliceHeader::nb_refs
unsigned int nb_refs[2]
Definition: hevcdec.h:282
Mv::x
int16_t x
horizontal component of motion vector
Definition: hevcdec.h:348
ff_slice_thread_init_progress
int av_cold ff_slice_thread_init_progress(AVCodecContext *avctx)
Definition: pthread_slice.c:179
AVCodecContext::level
int level
Encoding level descriptor.
Definition: avcodec.h:1740
hls_sao_param
static void hls_sao_param(HEVCLocalContext *lc, int rx, int ry)
Definition: hevcdec.c:1049
HEVC_NAL_RASL_R
@ HEVC_NAL_RASL_R
Definition: hevc.h:38
PF_BI
@ PF_BI
Definition: hevcdec.h:168
ff_hevc_no_residual_syntax_flag_decode
int ff_hevc_no_residual_syntax_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:841
SAMPLE_CTB
#define SAMPLE_CTB(tab, x, y)
Definition: hevcdec.h:73
HEVCWindow
Definition: hevc_ps.h:86
SCAN_HORIZ
@ SCAN_HORIZ
Definition: hevcdec.h:225
ff_hevc_frame_rps
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
Definition: hevc_refs.c:473
HEVCLocalContext::edge_emu_buffer
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:473
hevc_await_progress
static void hevc_await_progress(const HEVCContext *s, const HEVCFrame *ref, const Mv *mv, int y0, int height)
Definition: hevcdec.c:1854
IS_IRAP
#define IS_IRAP(s)
Definition: hevcdec.h:78
LongTermRPS::used
uint8_t used[32]
Definition: hevcdec.h:237
SliceHeader::colour_plane_id
uint8_t colour_plane_id
RPS coded in the slice header itself is stored here.
Definition: hevcdec.h:267
PART_nLx2N
@ PART_nLx2N
Definition: hevcdec.h:148
SliceHeader::dependent_slice_segment_flag
uint8_t dependent_slice_segment_flag
Definition: hevcdec.h:265
POS
#define POS(c_idx, x, y)
SliceHeader::slice_act_cb_qp_offset
int slice_act_cb_qp_offset
Definition: hevcdec.h:299
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
SliceHeader::first_slice_in_pic_flag
uint8_t first_slice_in_pic_flag
Definition: hevcdec.h:264
HEVCLocalContext::ctb_left_flag
uint8_t ctb_left_flag
Definition: hevcdec.h:466
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ff_hevc_res_scale_sign_flag
int ff_hevc_res_scale_sign_flag(HEVCLocalContext *lc, int idx)
Definition: hevc_cabac.c:919
ff_dovi_ctx_flush
void ff_dovi_ctx_flush(DOVIContext *s)
Partially reset the internal state.
Definition: dovi_rpu.c:54
ff_hevc_merge_idx_decode
int ff_hevc_merge_idx_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:794
AVPacket::size
int size
Definition: packet.h:492
BOUNDARY_UPPER_SLICE
#define BOUNDARY_UPPER_SLICE
Definition: hevcdec.h:485
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
hevcdec.h
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
decode_nal_units
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
Definition: hevcdec.c:3132
codec_internal.h
SAOParams::offset_abs
int offset_abs[3][4]
sao_offset_abs
Definition: hevcdsp.h:35
AV_PIX_FMT_YUV422P10LE
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:151
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
INTRA_PLANAR
@ INTRA_PLANAR
Definition: hevcdec.h:172
ff_hevc_decode_nal_sps
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
Definition: hevc_ps.c:1270
PART_2NxnD
@ PART_2NxnD
Definition: hevcdec.h:147
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:473
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
size
int size
Definition: twinvq_data.h:10344
HEVC_NAL_BLA_W_LP
@ HEVC_NAL_BLA_W_LP
Definition: hevc.h:45
SCAN_VERT
@ SCAN_VERT
Definition: hevcdec.h:226
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
ff_hevc_compute_poc
int ff_hevc_compute_poc(const HEVCSPS *sps, int pocTid0, int poc_lsb, int nal_unit_type)
Compute POC of the current frame and return it.
Definition: hevc_ps.c:2015
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
SliceHeader::collocated_ref_idx
unsigned int collocated_ref_idx
Definition: hevcdec.h:292
SliceHeader::entry_point_offset
unsigned * entry_point_offset
Definition: hevcdec.h:310
H2645NAL
Definition: h2645_parse.h:34
ff_hevc_cbf_luma_decode
int ff_hevc_cbf_luma_decode(HEVCLocalContext *lc, int trafo_depth)
Definition: hevc_cabac.c:889
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:475
ff_hevc_decode_nal_vps
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:441
pic_arrays_free
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
Definition: hevcdec.c:66
ff_hevc_luma_mv_merge_mode
void ff_hevc_luma_mv_merge_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevc_mvs.c:480
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
TransformUnit::chroma_mode_c
int chroma_mode_c
Definition: hevcdec.h:385
ff_hevc_prev_intra_luma_pred_flag_decode
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:760
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1544
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:216
GetBitContext::index
int index
Definition: get_bits.h:110
SliceHeader::short_term_ref_pic_set_sps_flag
int short_term_ref_pic_set_sps_flag
Definition: hevcdec.h:270
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:693
SliceHeader::no_output_of_prior_pics_flag
uint8_t no_output_of_prior_pics_flag
Definition: hevcdec.h:279
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
AVCodecHWConfigInternal
Definition: hwconfig.h:25
MvField
Definition: hevcdec.h:352
QPEL_EXTRA
#define QPEL_EXTRA
Definition: hevcdec.h:65
ff_hevc_end_of_slice_flag_decode
int ff_hevc_end_of_slice_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:618
PF_L1
@ PF_L1
Definition: hevcdec.h:167
intra_prediction_unit_default_value
static void intra_prediction_unit_default_value(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2200
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:80
get_format
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:397
ff_h2645_packet_split
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
Definition: h2645_parse.c:463
height
#define height
hevc_frame_end
static int hevc_frame_end(HEVCContext *s)
Definition: hevcdec.c:2925
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
hls_slice_data
static int hls_slice_data(HEVCContext *s)
Definition: hevcdec.c:2581
TransformUnit::cu_qp_offset_cb
int8_t cu_qp_offset_cb
Definition: hevcdec.h:388
pic_arrays_init
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:94
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
MvField::pred_flag
int8_t pred_flag
Definition: hevcdec.h:355
HEVCLocalContext::ct_depth
int ct_depth
Definition: hevcdec.h:478
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1543
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:162
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
PART_nRx2N
@ PART_nRx2N
Definition: hevcdec.h:149
EPEL_EXTRA_BEFORE
#define EPEL_EXTRA_BEFORE
Definition: hevcdec.h:60
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
SliceHeader::slice_cb_qp_offset
int slice_cb_qp_offset
Definition: hevcdec.h:295
SliceHeader
Definition: hevcdec.h:252
HEVCFrame::frame
AVFrame * frame
Definition: hevcdec.h:407
HEVC_NAL_TRAIL_R
@ HEVC_NAL_TRAIL_R
Definition: hevc.h:30
hevc_frame_start
static int hevc_frame_start(HEVCContext *s)
Definition: hevcdec.c:2843
av_md5_init
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
Definition: md5.c:143
ff_h274_apply_film_grain
int ff_h274_apply_film_grain(AVFrame *out_frame, const AVFrame *in_frame, H274FilmGrainDatabase *database, const AVFilmGrainParams *params)
Definition: h274.c:217
SliceHeader::slice_sample_adaptive_offset_flag
uint8_t slice_sample_adaptive_offset_flag[3]
Definition: hevcdec.h:284
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:217
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:245
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:138
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1904
HEVCFrame
Definition: hevcdec.h:406
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:542
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: avpacket.c:252
HEVCLocalContext::gb
GetBitContext gb
Definition: hevcdec.h:444
internal.h
EPEL_EXTRA_AFTER
#define EPEL_EXTRA_AFTER
Definition: hevcdec.h:61
HEVCFrame::nb_rpl_elems
int nb_rpl_elems
Definition: hevcdec.h:418
HEVCFrame::ctb_count
int ctb_count
Definition: hevcdec.h:414
src2
const pixel * src2
Definition: h264pred_template.c:422
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
SliceHeader::offset
int * offset
Definition: hevcdec.h:311
common.h
HEVCFrame::sequence
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
Definition: hevcdec.h:426
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
SliceHeader::mvd_l1_zero_flag
uint8_t m