FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "avcodec.h"
27 #include "codec_internal.h"
28 #include "decode.h"
29 #include "get_bits.h"
30 #include "hwaccel_internal.h"
31 #include "hwconfig.h"
32 #include "profiles.h"
33 #include "refstruct.h"
34 #include "thread.h"
35 #include "threadframe.h"
36 #include "pthread_internal.h"
37 
38 #include "videodsp.h"
39 #include "vp89_rac.h"
40 #include "vp9.h"
41 #include "vp9data.h"
42 #include "vp9dec.h"
43 #include "vpx_rac.h"
44 #include "libavutil/avassert.h"
45 #include "libavutil/pixdesc.h"
47 
48 #define VP9_SYNCCODE 0x498342
49 
50 #if HAVE_THREADS
51 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
52  (offsetof(VP9Context, progress_mutex)),
53  (offsetof(VP9Context, progress_cond)));
54 
55 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
56  VP9Context *s = avctx->priv_data;
57  int i;
58 
59  if (avctx->active_thread_type & FF_THREAD_SLICE) {
60  if (s->entries)
61  av_freep(&s->entries);
62 
63  s->entries = av_malloc_array(n, sizeof(atomic_int));
64  if (!s->entries)
65  return AVERROR(ENOMEM);
66 
67  for (i = 0; i < n; i++)
68  atomic_init(&s->entries[i], 0);
69  }
70  return 0;
71 }
72 
73 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
74  pthread_mutex_lock(&s->progress_mutex);
75  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
76  pthread_cond_signal(&s->progress_cond);
77  pthread_mutex_unlock(&s->progress_mutex);
78 }
79 
80 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
81  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
82  return;
83 
84  pthread_mutex_lock(&s->progress_mutex);
85  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
86  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
87  pthread_mutex_unlock(&s->progress_mutex);
88 }
89 #else
90 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
91 #endif
92 
94 {
95  av_freep(&td->b_base);
96  av_freep(&td->block_base);
97  av_freep(&td->block_structure);
98 }
99 
101 {
103  ff_refstruct_unref(&f->extradata);
104  ff_refstruct_unref(&f->hwaccel_picture_private);
105  f->segmentation_map = NULL;
106 }
107 
109 {
110  VP9Context *s = avctx->priv_data;
111  int ret, sz;
112 
114  if (ret < 0)
115  return ret;
116 
117  sz = 64 * s->sb_cols * s->sb_rows;
118  if (sz != s->frame_extradata_pool_size) {
119  ff_refstruct_pool_uninit(&s->frame_extradata_pool);
120  s->frame_extradata_pool = ff_refstruct_pool_alloc(sz * (1 + sizeof(VP9mvrefPair)),
122  if (!s->frame_extradata_pool) {
123  s->frame_extradata_pool_size = 0;
124  ret = AVERROR(ENOMEM);
125  goto fail;
126  }
127  s->frame_extradata_pool_size = sz;
128  }
129  f->extradata = ff_refstruct_pool_get(s->frame_extradata_pool);
130  if (!f->extradata) {
131  ret = AVERROR(ENOMEM);
132  goto fail;
133  }
134 
135  f->segmentation_map = f->extradata;
136  f->mv = (VP9mvrefPair *) ((char*)f->extradata + sz);
137 
138  ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private);
139  if (ret < 0)
140  goto fail;
141 
142  return 0;
143 
144 fail:
146  return ret;
147 }
148 
149 static int vp9_frame_ref(VP9Frame *dst, VP9Frame *src)
150 {
151  int ret;
152 
153  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
154  if (ret < 0)
155  return ret;
156 
157  dst->extradata = ff_refstruct_ref(src->extradata);
158 
159  dst->segmentation_map = src->segmentation_map;
160  dst->mv = src->mv;
161  dst->uses_2pass = src->uses_2pass;
162 
164  src->hwaccel_picture_private);
165 
166  return 0;
167 }
168 
169 static int update_size(AVCodecContext *avctx, int w, int h)
170 {
171 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
172  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
173  CONFIG_VP9_NVDEC_HWACCEL + \
174  CONFIG_VP9_VAAPI_HWACCEL + \
175  CONFIG_VP9_VDPAU_HWACCEL + \
176  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
177  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
178  VP9Context *s = avctx->priv_data;
179  uint8_t *p;
180  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
181  int lflvl_len, i;
182 
183  av_assert0(w > 0 && h > 0);
184 
185  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
186  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
187  return ret;
188 
189  switch (s->pix_fmt) {
190  case AV_PIX_FMT_YUV420P:
192 #if CONFIG_VP9_DXVA2_HWACCEL
193  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
194 #endif
195 #if CONFIG_VP9_D3D11VA_HWACCEL
196  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
197  *fmtp++ = AV_PIX_FMT_D3D11;
198 #endif
199 #if CONFIG_VP9_NVDEC_HWACCEL
200  *fmtp++ = AV_PIX_FMT_CUDA;
201 #endif
202 #if CONFIG_VP9_VAAPI_HWACCEL
203  *fmtp++ = AV_PIX_FMT_VAAPI;
204 #endif
205 #if CONFIG_VP9_VDPAU_HWACCEL
206  *fmtp++ = AV_PIX_FMT_VDPAU;
207 #endif
208 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
209  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
210 #endif
211  break;
213 #if CONFIG_VP9_NVDEC_HWACCEL
214  *fmtp++ = AV_PIX_FMT_CUDA;
215 #endif
216 #if CONFIG_VP9_VAAPI_HWACCEL
217  *fmtp++ = AV_PIX_FMT_VAAPI;
218 #endif
219 #if CONFIG_VP9_VDPAU_HWACCEL
220  *fmtp++ = AV_PIX_FMT_VDPAU;
221 #endif
222  break;
223  case AV_PIX_FMT_YUV444P:
226 #if CONFIG_VP9_VAAPI_HWACCEL
227  *fmtp++ = AV_PIX_FMT_VAAPI;
228 #endif
229  break;
230  case AV_PIX_FMT_GBRP:
231  case AV_PIX_FMT_GBRP10:
232  case AV_PIX_FMT_GBRP12:
233 #if CONFIG_VP9_VAAPI_HWACCEL
234  *fmtp++ = AV_PIX_FMT_VAAPI;
235 #endif
236  break;
237  }
238 
239  *fmtp++ = s->pix_fmt;
240  *fmtp = AV_PIX_FMT_NONE;
241 
242  ret = ff_get_format(avctx, pix_fmts);
243  if (ret < 0)
244  return ret;
245 
246  avctx->pix_fmt = ret;
247  s->gf_fmt = s->pix_fmt;
248  s->w = w;
249  s->h = h;
250  }
251 
252  cols = (w + 7) >> 3;
253  rows = (h + 7) >> 3;
254 
255  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
256  return 0;
257 
258  s->last_fmt = s->pix_fmt;
259  s->sb_cols = (w + 63) >> 6;
260  s->sb_rows = (h + 63) >> 6;
261  s->cols = (w + 7) >> 3;
262  s->rows = (h + 7) >> 3;
263  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
264 
265 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
266  av_freep(&s->intra_pred_data[0]);
267  // FIXME we slightly over-allocate here for subsampled chroma, but a little
268  // bit of padding shouldn't affect performance...
269  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
270  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
271  if (!p)
272  return AVERROR(ENOMEM);
273  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
274  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
275  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
276  assign(s->above_y_nnz_ctx, uint8_t *, 16);
277  assign(s->above_mode_ctx, uint8_t *, 16);
278  assign(s->above_mv_ctx, VP9mv(*)[2], 16);
279  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
280  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
281  assign(s->above_partition_ctx, uint8_t *, 8);
282  assign(s->above_skip_ctx, uint8_t *, 8);
283  assign(s->above_txfm_ctx, uint8_t *, 8);
284  assign(s->above_segpred_ctx, uint8_t *, 8);
285  assign(s->above_intra_ctx, uint8_t *, 8);
286  assign(s->above_comp_ctx, uint8_t *, 8);
287  assign(s->above_ref_ctx, uint8_t *, 8);
288  assign(s->above_filter_ctx, uint8_t *, 8);
289  assign(s->lflvl, VP9Filter *, lflvl_len);
290 #undef assign
291 
292  if (s->td) {
293  for (i = 0; i < s->active_tile_cols; i++)
294  vp9_tile_data_free(&s->td[i]);
295  }
296 
297  if (s->s.h.bpp != s->last_bpp) {
298  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
299  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
300  s->last_bpp = s->s.h.bpp;
301  }
302 
303  return 0;
304 }
305 
307 {
308  int i;
309  VP9Context *s = avctx->priv_data;
310  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
311  VP9TileData *td = &s->td[0];
312 
313  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
314  return 0;
315 
317  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
318  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
319  if (s->s.frames[CUR_FRAME].uses_2pass) {
320  int sbs = s->sb_cols * s->sb_rows;
321 
322  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
323  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
324  16 * 16 + 2 * chroma_eobs) * sbs);
325  if (!td->b_base || !td->block_base)
326  return AVERROR(ENOMEM);
327  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
328  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
329  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
330  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
331  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
332 
334  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
335  if (!td->block_structure)
336  return AVERROR(ENOMEM);
337  }
338  } else {
339  for (i = 1; i < s->active_tile_cols; i++)
340  vp9_tile_data_free(&s->td[i]);
341 
342  for (i = 0; i < s->active_tile_cols; i++) {
343  s->td[i].b_base = av_malloc(sizeof(VP9Block));
344  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
345  16 * 16 + 2 * chroma_eobs);
346  if (!s->td[i].b_base || !s->td[i].block_base)
347  return AVERROR(ENOMEM);
348  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
349  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
350  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
351  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
352  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
353 
355  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
356  if (!s->td[i].block_structure)
357  return AVERROR(ENOMEM);
358  }
359  }
360  }
361  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
362 
363  return 0;
364 }
365 
366 // The sign bit is at the end, not the start, of a bit sequence
368 {
369  int v = get_bits(gb, n);
370  return get_bits1(gb) ? -v : v;
371 }
372 
373 static av_always_inline int inv_recenter_nonneg(int v, int m)
374 {
375  if (v > 2 * m)
376  return v;
377  if (v & 1)
378  return m - ((v + 1) >> 1);
379  return m + (v >> 1);
380 }
381 
382 // differential forward probability updates
383 static int update_prob(VPXRangeCoder *c, int p)
384 {
385  static const uint8_t inv_map_table[255] = {
386  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
387  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
388  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
389  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
390  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
391  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
392  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
393  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
394  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
395  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
396  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
397  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
398  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
399  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
400  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
401  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
402  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
403  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
404  252, 253, 253,
405  };
406  int d;
407 
408  /* This code is trying to do a differential probability update. For a
409  * current probability A in the range [1, 255], the difference to a new
410  * probability of any value can be expressed differentially as 1-A, 255-A
411  * where some part of this (absolute range) exists both in positive as
412  * well as the negative part, whereas another part only exists in one
413  * half. We're trying to code this shared part differentially, i.e.
414  * times two where the value of the lowest bit specifies the sign, and
415  * the single part is then coded on top of this. This absolute difference
416  * then again has a value of [0, 254], but a bigger value in this range
417  * indicates that we're further away from the original value A, so we
418  * can code this as a VLC code, since higher values are increasingly
419  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
420  * updates vs. the 'fine, exact' updates further down the range, which
421  * adds one extra dimension to this differential update model. */
422 
423  if (!vp89_rac_get(c)) {
424  d = vp89_rac_get_uint(c, 4) + 0;
425  } else if (!vp89_rac_get(c)) {
426  d = vp89_rac_get_uint(c, 4) + 16;
427  } else if (!vp89_rac_get(c)) {
428  d = vp89_rac_get_uint(c, 5) + 32;
429  } else {
430  d = vp89_rac_get_uint(c, 7);
431  if (d >= 65)
432  d = (d << 1) - 65 + vp89_rac_get(c);
433  d += 64;
434  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
435  }
436 
437  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
438  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
439 }
440 
442 {
443  static const enum AVColorSpace colorspaces[8] = {
446  };
447  VP9Context *s = avctx->priv_data;
448  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
449 
450  s->bpp_index = bits;
451  s->s.h.bpp = 8 + bits * 2;
452  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
453  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
454  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
455  static const enum AVPixelFormat pix_fmt_rgb[3] = {
457  };
458  s->ss_h = s->ss_v = 0;
459  avctx->color_range = AVCOL_RANGE_JPEG;
460  s->pix_fmt = pix_fmt_rgb[bits];
461  if (avctx->profile & 1) {
462  if (get_bits1(&s->gb)) {
463  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
464  return AVERROR_INVALIDDATA;
465  }
466  } else {
467  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
468  avctx->profile);
469  return AVERROR_INVALIDDATA;
470  }
471  } else {
472  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
479  };
481  if (avctx->profile & 1) {
482  s->ss_h = get_bits1(&s->gb);
483  s->ss_v = get_bits1(&s->gb);
484  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
485  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
486  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
487  avctx->profile);
488  return AVERROR_INVALIDDATA;
489  } else if (get_bits1(&s->gb)) {
490  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
491  avctx->profile);
492  return AVERROR_INVALIDDATA;
493  }
494  } else {
495  s->ss_h = s->ss_v = 1;
496  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
497  }
498  }
499 
500  return 0;
501 }
502 
504  const uint8_t *data, int size, int *ref)
505 {
506  VP9Context *s = avctx->priv_data;
507  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
508  int last_invisible;
509  const uint8_t *data2;
510 
511  /* general header */
512  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
513  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
514  return ret;
515  }
516  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
517  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
518  return AVERROR_INVALIDDATA;
519  }
520  avctx->profile = get_bits1(&s->gb);
521  avctx->profile |= get_bits1(&s->gb) << 1;
522  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
523  if (avctx->profile > 3) {
524  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
525  return AVERROR_INVALIDDATA;
526  }
527  s->s.h.profile = avctx->profile;
528  if (get_bits1(&s->gb)) {
529  *ref = get_bits(&s->gb, 3);
530  return 0;
531  }
532 
533  s->last_keyframe = s->s.h.keyframe;
534  s->s.h.keyframe = !get_bits1(&s->gb);
535 
536  last_invisible = s->s.h.invisible;
537  s->s.h.invisible = !get_bits1(&s->gb);
538  s->s.h.errorres = get_bits1(&s->gb);
539  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
540 
541  if (s->s.h.keyframe) {
542  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
543  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
544  return AVERROR_INVALIDDATA;
545  }
546  if ((ret = read_colorspace_details(avctx)) < 0)
547  return ret;
548  // for profile 1, here follows the subsampling bits
549  s->s.h.refreshrefmask = 0xff;
550  w = get_bits(&s->gb, 16) + 1;
551  h = get_bits(&s->gb, 16) + 1;
552  if (get_bits1(&s->gb)) // display size
553  skip_bits(&s->gb, 32);
554  } else {
555  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
556  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
557  if (s->s.h.intraonly) {
558  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
559  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
560  return AVERROR_INVALIDDATA;
561  }
562  if (avctx->profile >= 1) {
563  if ((ret = read_colorspace_details(avctx)) < 0)
564  return ret;
565  } else {
566  s->ss_h = s->ss_v = 1;
567  s->s.h.bpp = 8;
568  s->bpp_index = 0;
569  s->bytesperpixel = 1;
570  s->pix_fmt = AV_PIX_FMT_YUV420P;
571  avctx->colorspace = AVCOL_SPC_BT470BG;
572  avctx->color_range = AVCOL_RANGE_MPEG;
573  }
574  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
575  w = get_bits(&s->gb, 16) + 1;
576  h = get_bits(&s->gb, 16) + 1;
577  if (get_bits1(&s->gb)) // display size
578  skip_bits(&s->gb, 32);
579  } else {
580  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
581  s->s.h.refidx[0] = get_bits(&s->gb, 3);
582  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
583  s->s.h.refidx[1] = get_bits(&s->gb, 3);
584  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
585  s->s.h.refidx[2] = get_bits(&s->gb, 3);
586  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
587  if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
588  !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
589  !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
590  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
591  return AVERROR_INVALIDDATA;
592  }
593  if (get_bits1(&s->gb)) {
594  w = s->s.refs[s->s.h.refidx[0]].f->width;
595  h = s->s.refs[s->s.h.refidx[0]].f->height;
596  } else if (get_bits1(&s->gb)) {
597  w = s->s.refs[s->s.h.refidx[1]].f->width;
598  h = s->s.refs[s->s.h.refidx[1]].f->height;
599  } else if (get_bits1(&s->gb)) {
600  w = s->s.refs[s->s.h.refidx[2]].f->width;
601  h = s->s.refs[s->s.h.refidx[2]].f->height;
602  } else {
603  w = get_bits(&s->gb, 16) + 1;
604  h = get_bits(&s->gb, 16) + 1;
605  }
606  // Note that in this code, "CUR_FRAME" is actually before we
607  // have formally allocated a frame, and thus actually represents
608  // the _last_ frame
609  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
610  s->s.frames[CUR_FRAME].tf.f->height == h;
611  if (get_bits1(&s->gb)) // display size
612  skip_bits(&s->gb, 32);
613  s->s.h.highprecisionmvs = get_bits1(&s->gb);
614  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
615  get_bits(&s->gb, 2);
616  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
617  s->s.h.signbias[0] != s->s.h.signbias[2];
618  if (s->s.h.allowcompinter) {
619  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
620  s->s.h.fixcompref = 2;
621  s->s.h.varcompref[0] = 0;
622  s->s.h.varcompref[1] = 1;
623  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
624  s->s.h.fixcompref = 1;
625  s->s.h.varcompref[0] = 0;
626  s->s.h.varcompref[1] = 2;
627  } else {
628  s->s.h.fixcompref = 0;
629  s->s.h.varcompref[0] = 1;
630  s->s.h.varcompref[1] = 2;
631  }
632  }
633  }
634  }
635  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
636  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
637  s->s.h.framectxid = c = get_bits(&s->gb, 2);
638  if (s->s.h.keyframe || s->s.h.intraonly)
639  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
640 
641  /* loopfilter header data */
642  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
643  // reset loopfilter defaults
644  s->s.h.lf_delta.ref[0] = 1;
645  s->s.h.lf_delta.ref[1] = 0;
646  s->s.h.lf_delta.ref[2] = -1;
647  s->s.h.lf_delta.ref[3] = -1;
648  s->s.h.lf_delta.mode[0] = 0;
649  s->s.h.lf_delta.mode[1] = 0;
650  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
651  }
652  s->s.h.filter.level = get_bits(&s->gb, 6);
653  sharp = get_bits(&s->gb, 3);
654  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
655  // the old cache values since they are still valid
656  if (s->s.h.filter.sharpness != sharp) {
657  for (i = 1; i <= 63; i++) {
658  int limit = i;
659 
660  if (sharp > 0) {
661  limit >>= (sharp + 3) >> 2;
662  limit = FFMIN(limit, 9 - sharp);
663  }
664  limit = FFMAX(limit, 1);
665 
666  s->filter_lut.lim_lut[i] = limit;
667  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
668  }
669  }
670  s->s.h.filter.sharpness = sharp;
671  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
672  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
673  for (i = 0; i < 4; i++)
674  if (get_bits1(&s->gb))
675  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
676  for (i = 0; i < 2; i++)
677  if (get_bits1(&s->gb))
678  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
679  }
680  }
681 
682  /* quantization header data */
683  s->s.h.yac_qi = get_bits(&s->gb, 8);
684  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
685  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
686  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
687  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
688  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
689  if (s->s.h.lossless)
691 
692  /* segmentation header info */
693  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
694  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
695  for (i = 0; i < 7; i++)
696  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
697  get_bits(&s->gb, 8) : 255;
698  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
699  for (i = 0; i < 3; i++)
700  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
701  get_bits(&s->gb, 8) : 255;
702  }
703 
704  if (get_bits1(&s->gb)) {
705  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
706  for (i = 0; i < 8; i++) {
707  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
708  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
709  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
710  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
711  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
712  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
713  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
714  }
715  }
716  }
717 
718  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
719  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
720  int qyac, qydc, quvac, quvdc, lflvl, sh;
721 
722  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
723  if (s->s.h.segmentation.absolute_vals)
724  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
725  else
726  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
727  } else {
728  qyac = s->s.h.yac_qi;
729  }
730  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
731  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
732  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
733  qyac = av_clip_uintp2(qyac, 8);
734 
735  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
736  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
737  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
738  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
739 
740  sh = s->s.h.filter.level >= 32;
741  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
742  if (s->s.h.segmentation.absolute_vals)
743  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
744  else
745  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
746  } else {
747  lflvl = s->s.h.filter.level;
748  }
749  if (s->s.h.lf_delta.enabled) {
750  s->s.h.segmentation.feat[i].lflvl[0][0] =
751  s->s.h.segmentation.feat[i].lflvl[0][1] =
752  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
753  for (j = 1; j < 4; j++) {
754  s->s.h.segmentation.feat[i].lflvl[j][0] =
755  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
756  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
757  s->s.h.segmentation.feat[i].lflvl[j][1] =
758  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
759  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
760  }
761  } else {
762  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
763  sizeof(s->s.h.segmentation.feat[i].lflvl));
764  }
765  }
766 
767  /* tiling info */
768  if ((ret = update_size(avctx, w, h)) < 0) {
769  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
770  w, h, s->pix_fmt);
771  return ret;
772  }
773  for (s->s.h.tiling.log2_tile_cols = 0;
774  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
775  s->s.h.tiling.log2_tile_cols++) ;
776  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
777  max = FFMAX(0, max - 1);
778  while (max > s->s.h.tiling.log2_tile_cols) {
779  if (get_bits1(&s->gb))
780  s->s.h.tiling.log2_tile_cols++;
781  else
782  break;
783  }
784  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
785  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
786  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
787  int n_range_coders;
788  VPXRangeCoder *rc;
789 
790  if (s->td) {
791  for (i = 0; i < s->active_tile_cols; i++)
792  vp9_tile_data_free(&s->td[i]);
793  av_freep(&s->td);
794  }
795 
796  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
797  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
798  s->s.h.tiling.tile_cols : 1;
799  vp9_alloc_entries(avctx, s->sb_rows);
800  if (avctx->active_thread_type == FF_THREAD_SLICE) {
801  n_range_coders = 4; // max_tile_rows
802  } else {
803  n_range_coders = s->s.h.tiling.tile_cols;
804  }
805  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
806  n_range_coders * sizeof(VPXRangeCoder));
807  if (!s->td)
808  return AVERROR(ENOMEM);
809  rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
810  for (i = 0; i < s->active_tile_cols; i++) {
811  s->td[i].s = s;
812  s->td[i].c_b = rc;
813  rc += n_range_coders;
814  }
815  }
816 
817  /* check reference frames */
818  if (!s->s.h.keyframe && !s->s.h.intraonly) {
819  int valid_ref_frame = 0;
820  for (i = 0; i < 3; i++) {
821  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
822  int refw = ref->width, refh = ref->height;
823 
824  if (ref->format != avctx->pix_fmt) {
825  av_log(avctx, AV_LOG_ERROR,
826  "Ref pixfmt (%s) did not match current frame (%s)",
827  av_get_pix_fmt_name(ref->format),
828  av_get_pix_fmt_name(avctx->pix_fmt));
829  return AVERROR_INVALIDDATA;
830  } else if (refw == w && refh == h) {
831  s->mvscale[i][0] = s->mvscale[i][1] = 0;
832  } else {
833  /* Check to make sure at least one of frames that */
834  /* this frame references has valid dimensions */
835  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
836  av_log(avctx, AV_LOG_WARNING,
837  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
838  refw, refh, w, h);
839  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
840  continue;
841  }
842  s->mvscale[i][0] = (refw << 14) / w;
843  s->mvscale[i][1] = (refh << 14) / h;
844  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
845  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
846  }
847  valid_ref_frame++;
848  }
849  if (!valid_ref_frame) {
850  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
851  return AVERROR_INVALIDDATA;
852  }
853  }
854 
855  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
856  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
857  s->prob_ctx[3].p = ff_vp9_default_probs;
858  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
859  sizeof(ff_vp9_default_coef_probs));
860  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
861  sizeof(ff_vp9_default_coef_probs));
862  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
863  sizeof(ff_vp9_default_coef_probs));
864  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
865  sizeof(ff_vp9_default_coef_probs));
866  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
867  s->prob_ctx[c].p = ff_vp9_default_probs;
868  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
869  sizeof(ff_vp9_default_coef_probs));
870  }
871 
872  // next 16 bits is size of the rest of the header (arith-coded)
873  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
874  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
875 
876  data2 = align_get_bits(&s->gb);
877  if (size2 > size - (data2 - data)) {
878  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
879  return AVERROR_INVALIDDATA;
880  }
881  ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
882  if (ret < 0)
883  return ret;
884 
885  if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
886  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
887  return AVERROR_INVALIDDATA;
888  }
889 
890  for (i = 0; i < s->active_tile_cols; i++) {
891  if (s->s.h.keyframe || s->s.h.intraonly) {
892  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
893  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
894  } else {
895  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
896  }
897  s->td[i].nb_block_structure = 0;
898  }
899 
900  /* FIXME is it faster to not copy here, but do it down in the fw updates
901  * as explicit copies if the fw update is missing (and skip the copy upon
902  * fw update)? */
903  s->prob.p = s->prob_ctx[c].p;
904 
905  // txfm updates
906  if (s->s.h.lossless) {
907  s->s.h.txfmmode = TX_4X4;
908  } else {
909  s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
910  if (s->s.h.txfmmode == 3)
911  s->s.h.txfmmode += vp89_rac_get(&s->c);
912 
913  if (s->s.h.txfmmode == TX_SWITCHABLE) {
914  for (i = 0; i < 2; i++)
915  if (vpx_rac_get_prob_branchy(&s->c, 252))
916  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
917  for (i = 0; i < 2; i++)
918  for (j = 0; j < 2; j++)
919  if (vpx_rac_get_prob_branchy(&s->c, 252))
920  s->prob.p.tx16p[i][j] =
921  update_prob(&s->c, s->prob.p.tx16p[i][j]);
922  for (i = 0; i < 2; i++)
923  for (j = 0; j < 3; j++)
924  if (vpx_rac_get_prob_branchy(&s->c, 252))
925  s->prob.p.tx32p[i][j] =
926  update_prob(&s->c, s->prob.p.tx32p[i][j]);
927  }
928  }
929 
930  // coef updates
931  for (i = 0; i < 4; i++) {
932  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
933  if (vp89_rac_get(&s->c)) {
934  for (j = 0; j < 2; j++)
935  for (k = 0; k < 2; k++)
936  for (l = 0; l < 6; l++)
937  for (m = 0; m < 6; m++) {
938  uint8_t *p = s->prob.coef[i][j][k][l][m];
939  uint8_t *r = ref[j][k][l][m];
940  if (m >= 3 && l == 0) // dc only has 3 pt
941  break;
942  for (n = 0; n < 3; n++) {
943  if (vpx_rac_get_prob_branchy(&s->c, 252))
944  p[n] = update_prob(&s->c, r[n]);
945  else
946  p[n] = r[n];
947  }
948  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
949  }
950  } else {
951  for (j = 0; j < 2; j++)
952  for (k = 0; k < 2; k++)
953  for (l = 0; l < 6; l++)
954  for (m = 0; m < 6; m++) {
955  uint8_t *p = s->prob.coef[i][j][k][l][m];
956  uint8_t *r = ref[j][k][l][m];
957  if (m > 3 && l == 0) // dc only has 3 pt
958  break;
959  memcpy(p, r, 3);
960  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
961  }
962  }
963  if (s->s.h.txfmmode == i)
964  break;
965  }
966 
967  // mode updates
968  for (i = 0; i < 3; i++)
969  if (vpx_rac_get_prob_branchy(&s->c, 252))
970  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
971  if (!s->s.h.keyframe && !s->s.h.intraonly) {
972  for (i = 0; i < 7; i++)
973  for (j = 0; j < 3; j++)
974  if (vpx_rac_get_prob_branchy(&s->c, 252))
975  s->prob.p.mv_mode[i][j] =
976  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
977 
978  if (s->s.h.filtermode == FILTER_SWITCHABLE)
979  for (i = 0; i < 4; i++)
980  for (j = 0; j < 2; j++)
981  if (vpx_rac_get_prob_branchy(&s->c, 252))
982  s->prob.p.filter[i][j] =
983  update_prob(&s->c, s->prob.p.filter[i][j]);
984 
985  for (i = 0; i < 4; i++)
986  if (vpx_rac_get_prob_branchy(&s->c, 252))
987  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
988 
989  if (s->s.h.allowcompinter) {
990  s->s.h.comppredmode = vp89_rac_get(&s->c);
991  if (s->s.h.comppredmode)
992  s->s.h.comppredmode += vp89_rac_get(&s->c);
993  if (s->s.h.comppredmode == PRED_SWITCHABLE)
994  for (i = 0; i < 5; i++)
995  if (vpx_rac_get_prob_branchy(&s->c, 252))
996  s->prob.p.comp[i] =
997  update_prob(&s->c, s->prob.p.comp[i]);
998  } else {
999  s->s.h.comppredmode = PRED_SINGLEREF;
1000  }
1001 
1002  if (s->s.h.comppredmode != PRED_COMPREF) {
1003  for (i = 0; i < 5; i++) {
1004  if (vpx_rac_get_prob_branchy(&s->c, 252))
1005  s->prob.p.single_ref[i][0] =
1006  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1007  if (vpx_rac_get_prob_branchy(&s->c, 252))
1008  s->prob.p.single_ref[i][1] =
1009  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1010  }
1011  }
1012 
1013  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1014  for (i = 0; i < 5; i++)
1015  if (vpx_rac_get_prob_branchy(&s->c, 252))
1016  s->prob.p.comp_ref[i] =
1017  update_prob(&s->c, s->prob.p.comp_ref[i]);
1018  }
1019 
1020  for (i = 0; i < 4; i++)
1021  for (j = 0; j < 9; j++)
1022  if (vpx_rac_get_prob_branchy(&s->c, 252))
1023  s->prob.p.y_mode[i][j] =
1024  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1025 
1026  for (i = 0; i < 4; i++)
1027  for (j = 0; j < 4; j++)
1028  for (k = 0; k < 3; k++)
1029  if (vpx_rac_get_prob_branchy(&s->c, 252))
1030  s->prob.p.partition[3 - i][j][k] =
1031  update_prob(&s->c,
1032  s->prob.p.partition[3 - i][j][k]);
1033 
1034  // mv fields don't use the update_prob subexp model for some reason
1035  for (i = 0; i < 3; i++)
1036  if (vpx_rac_get_prob_branchy(&s->c, 252))
1037  s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1038 
1039  for (i = 0; i < 2; i++) {
1040  if (vpx_rac_get_prob_branchy(&s->c, 252))
1041  s->prob.p.mv_comp[i].sign =
1042  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1043 
1044  for (j = 0; j < 10; j++)
1045  if (vpx_rac_get_prob_branchy(&s->c, 252))
1046  s->prob.p.mv_comp[i].classes[j] =
1047  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1048 
1049  if (vpx_rac_get_prob_branchy(&s->c, 252))
1050  s->prob.p.mv_comp[i].class0 =
1051  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1052 
1053  for (j = 0; j < 10; j++)
1054  if (vpx_rac_get_prob_branchy(&s->c, 252))
1055  s->prob.p.mv_comp[i].bits[j] =
1056  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1057  }
1058 
1059  for (i = 0; i < 2; i++) {
1060  for (j = 0; j < 2; j++)
1061  for (k = 0; k < 3; k++)
1062  if (vpx_rac_get_prob_branchy(&s->c, 252))
1063  s->prob.p.mv_comp[i].class0_fp[j][k] =
1064  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1065 
1066  for (j = 0; j < 3; j++)
1067  if (vpx_rac_get_prob_branchy(&s->c, 252))
1068  s->prob.p.mv_comp[i].fp[j] =
1069  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1070  }
1071 
1072  if (s->s.h.highprecisionmvs) {
1073  for (i = 0; i < 2; i++) {
1074  if (vpx_rac_get_prob_branchy(&s->c, 252))
1075  s->prob.p.mv_comp[i].class0_hp =
1076  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1077 
1078  if (vpx_rac_get_prob_branchy(&s->c, 252))
1079  s->prob.p.mv_comp[i].hp =
1080  (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
1081  }
1082  }
1083  }
1084 
1085  return (data2 - data) + size2;
1086 }
1087 
1088 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1089  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1090 {
1091  const VP9Context *s = td->s;
1092  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1093  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1094  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1095  s->prob.p.partition[bl][c];
1096  enum BlockPartition bp;
1097  ptrdiff_t hbs = 4 >> bl;
1098  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1099  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1100  int bytesperpixel = s->bytesperpixel;
1101 
1102  if (bl == BL_8X8) {
1104  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1105  } else if (col + hbs < s->cols) { // FIXME why not <=?
1106  if (row + hbs < s->rows) { // FIXME why not <=?
1108  switch (bp) {
1109  case PARTITION_NONE:
1110  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1111  break;
1112  case PARTITION_H:
1113  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1114  yoff += hbs * 8 * y_stride;
1115  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1116  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1117  break;
1118  case PARTITION_V:
1119  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1120  yoff += hbs * 8 * bytesperpixel;
1121  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1122  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1123  break;
1124  case PARTITION_SPLIT:
1125  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1126  decode_sb(td, row, col + hbs, lflvl,
1127  yoff + 8 * hbs * bytesperpixel,
1128  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1129  yoff += hbs * 8 * y_stride;
1130  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1131  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1132  decode_sb(td, row + hbs, col + hbs, lflvl,
1133  yoff + 8 * hbs * bytesperpixel,
1134  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1135  break;
1136  default:
1137  av_assert0(0);
1138  }
1139  } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
1140  bp = PARTITION_SPLIT;
1141  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1142  decode_sb(td, row, col + hbs, lflvl,
1143  yoff + 8 * hbs * bytesperpixel,
1144  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1145  } else {
1146  bp = PARTITION_H;
1147  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1148  }
1149  } else if (row + hbs < s->rows) { // FIXME why not <=?
1150  if (vpx_rac_get_prob_branchy(td->c, p[2])) {
1151  bp = PARTITION_SPLIT;
1152  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1153  yoff += hbs * 8 * y_stride;
1154  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1155  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1156  } else {
1157  bp = PARTITION_V;
1158  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1159  }
1160  } else {
1161  bp = PARTITION_SPLIT;
1162  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1163  }
1164  td->counts.partition[bl][c][bp]++;
1165 }
1166 
1167 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1168  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1169 {
1170  const VP9Context *s = td->s;
1171  VP9Block *b = td->b;
1172  ptrdiff_t hbs = 4 >> bl;
1173  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1174  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1175  int bytesperpixel = s->bytesperpixel;
1176 
1177  if (bl == BL_8X8) {
1178  av_assert2(b->bl == BL_8X8);
1179  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1180  } else if (td->b->bl == bl) {
1181  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1182  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1183  yoff += hbs * 8 * y_stride;
1184  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1185  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1186  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1187  yoff += hbs * 8 * bytesperpixel;
1188  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1189  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1190  }
1191  } else {
1192  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1193  if (col + hbs < s->cols) { // FIXME why not <=?
1194  if (row + hbs < s->rows) {
1195  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1196  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1197  yoff += hbs * 8 * y_stride;
1198  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1199  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1200  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1201  yoff + 8 * hbs * bytesperpixel,
1202  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1203  } else {
1204  yoff += hbs * 8 * bytesperpixel;
1205  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1206  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1207  }
1208  } else if (row + hbs < s->rows) {
1209  yoff += hbs * 8 * y_stride;
1210  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1211  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1212  }
1213  }
1214 }
1215 
1216 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1217 {
1218  int sb_start = ( idx * n) >> log2_n;
1219  int sb_end = ((idx + 1) * n) >> log2_n;
1220  *start = FFMIN(sb_start, n) << 3;
1221  *end = FFMIN(sb_end, n) << 3;
1222 }
1223 
1225 {
1226  int i;
1227 
1228  av_freep(&s->intra_pred_data[0]);
1229  for (i = 0; i < s->active_tile_cols; i++)
1230  vp9_tile_data_free(&s->td[i]);
1231 }
1232 
1234 {
1235  VP9Context *s = avctx->priv_data;
1236  int i;
1237 
1238  for (i = 0; i < 3; i++) {
1239  vp9_frame_unref(&s->s.frames[i]);
1240  av_frame_free(&s->s.frames[i].tf.f);
1241  }
1242  ff_refstruct_pool_uninit(&s->frame_extradata_pool);
1243  for (i = 0; i < 8; i++) {
1244  ff_thread_release_ext_buffer(&s->s.refs[i]);
1245  av_frame_free(&s->s.refs[i].f);
1246  ff_thread_release_ext_buffer(&s->next_refs[i]);
1247  av_frame_free(&s->next_refs[i].f);
1248  }
1249 
1250  free_buffers(s);
1251 #if HAVE_THREADS
1252  av_freep(&s->entries);
1253  ff_pthread_free(s, vp9_context_offsets);
1254 #endif
1255  av_freep(&s->td);
1256  return 0;
1257 }
1258 
1259 static int decode_tiles(AVCodecContext *avctx,
1260  const uint8_t *data, int size)
1261 {
1262  VP9Context *s = avctx->priv_data;
1263  VP9TileData *td = &s->td[0];
1264  int row, col, tile_row, tile_col, ret;
1265  int bytesperpixel;
1266  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1267  AVFrame *f;
1268  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1269 
1270  f = s->s.frames[CUR_FRAME].tf.f;
1271  ls_y = f->linesize[0];
1272  ls_uv =f->linesize[1];
1273  bytesperpixel = s->bytesperpixel;
1274 
1275  yoff = uvoff = 0;
1276  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1277  set_tile_offset(&tile_row_start, &tile_row_end,
1278  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1279 
1280  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1281  int64_t tile_size;
1282 
1283  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1284  tile_row == s->s.h.tiling.tile_rows - 1) {
1285  tile_size = size;
1286  } else {
1287  tile_size = AV_RB32(data);
1288  data += 4;
1289  size -= 4;
1290  }
1291  if (tile_size > size)
1292  return AVERROR_INVALIDDATA;
1293  ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1294  if (ret < 0)
1295  return ret;
1296  if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) // marker bit
1297  return AVERROR_INVALIDDATA;
1298  data += tile_size;
1299  size -= tile_size;
1300  }
1301 
1302  for (row = tile_row_start; row < tile_row_end;
1303  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1304  VP9Filter *lflvl_ptr = s->lflvl;
1305  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1306 
1307  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1308  set_tile_offset(&tile_col_start, &tile_col_end,
1309  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1310  td->tile_col_start = tile_col_start;
1311  if (s->pass != 2) {
1312  memset(td->left_partition_ctx, 0, 8);
1313  memset(td->left_skip_ctx, 0, 8);
1314  if (s->s.h.keyframe || s->s.h.intraonly) {
1315  memset(td->left_mode_ctx, DC_PRED, 16);
1316  } else {
1317  memset(td->left_mode_ctx, NEARESTMV, 8);
1318  }
1319  memset(td->left_y_nnz_ctx, 0, 16);
1320  memset(td->left_uv_nnz_ctx, 0, 32);
1321  memset(td->left_segpred_ctx, 0, 8);
1322 
1323  td->c = &td->c_b[tile_col];
1324  }
1325 
1326  for (col = tile_col_start;
1327  col < tile_col_end;
1328  col += 8, yoff2 += 64 * bytesperpixel,
1329  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1330  // FIXME integrate with lf code (i.e. zero after each
1331  // use, similar to invtxfm coefficients, or similar)
1332  if (s->pass != 1) {
1333  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1334  }
1335 
1336  if (s->pass == 2) {
1337  decode_sb_mem(td, row, col, lflvl_ptr,
1338  yoff2, uvoff2, BL_64X64);
1339  } else {
1340  if (vpx_rac_is_end(td->c)) {
1341  return AVERROR_INVALIDDATA;
1342  }
1343  decode_sb(td, row, col, lflvl_ptr,
1344  yoff2, uvoff2, BL_64X64);
1345  }
1346  }
1347  }
1348 
1349  if (s->pass == 1)
1350  continue;
1351 
1352  // backup pre-loopfilter reconstruction data for intra
1353  // prediction of next row of sb64s
1354  if (row + 8 < s->rows) {
1355  memcpy(s->intra_pred_data[0],
1356  f->data[0] + yoff + 63 * ls_y,
1357  8 * s->cols * bytesperpixel);
1358  memcpy(s->intra_pred_data[1],
1359  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1360  8 * s->cols * bytesperpixel >> s->ss_h);
1361  memcpy(s->intra_pred_data[2],
1362  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1363  8 * s->cols * bytesperpixel >> s->ss_h);
1364  }
1365 
1366  // loopfilter one row
1367  if (s->s.h.filter.level) {
1368  yoff2 = yoff;
1369  uvoff2 = uvoff;
1370  lflvl_ptr = s->lflvl;
1371  for (col = 0; col < s->cols;
1372  col += 8, yoff2 += 64 * bytesperpixel,
1373  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1374  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1375  yoff2, uvoff2);
1376  }
1377  }
1378 
1379  // FIXME maybe we can make this more finegrained by running the
1380  // loopfilter per-block instead of after each sbrow
1381  // In fact that would also make intra pred left preparation easier?
1382  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
1383  }
1384  }
1385  return 0;
1386 }
1387 
1388 #if HAVE_THREADS
1389 static av_always_inline
1390 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1391  int threadnr)
1392 {
1393  VP9Context *s = avctx->priv_data;
1394  VP9TileData *td = &s->td[jobnr];
1395  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1396  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1397  unsigned tile_cols_len;
1398  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1399  VP9Filter *lflvl_ptr_base;
1400  AVFrame *f;
1401 
1402  f = s->s.frames[CUR_FRAME].tf.f;
1403  ls_y = f->linesize[0];
1404  ls_uv =f->linesize[1];
1405 
1406  set_tile_offset(&tile_col_start, &tile_col_end,
1407  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1408  td->tile_col_start = tile_col_start;
1409  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1410  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1411  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1412 
1413  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1414  set_tile_offset(&tile_row_start, &tile_row_end,
1415  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1416 
1417  td->c = &td->c_b[tile_row];
1418  for (row = tile_row_start; row < tile_row_end;
1419  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1420  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1421  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1422 
1423  memset(td->left_partition_ctx, 0, 8);
1424  memset(td->left_skip_ctx, 0, 8);
1425  if (s->s.h.keyframe || s->s.h.intraonly) {
1426  memset(td->left_mode_ctx, DC_PRED, 16);
1427  } else {
1428  memset(td->left_mode_ctx, NEARESTMV, 8);
1429  }
1430  memset(td->left_y_nnz_ctx, 0, 16);
1431  memset(td->left_uv_nnz_ctx, 0, 32);
1432  memset(td->left_segpred_ctx, 0, 8);
1433 
1434  for (col = tile_col_start;
1435  col < tile_col_end;
1436  col += 8, yoff2 += 64 * bytesperpixel,
1437  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1438  // FIXME integrate with lf code (i.e. zero after each
1439  // use, similar to invtxfm coefficients, or similar)
1440  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1441  decode_sb(td, row, col, lflvl_ptr,
1442  yoff2, uvoff2, BL_64X64);
1443  }
1444 
1445  // backup pre-loopfilter reconstruction data for intra
1446  // prediction of next row of sb64s
1447  tile_cols_len = tile_col_end - tile_col_start;
1448  if (row + 8 < s->rows) {
1449  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1450  f->data[0] + yoff + 63 * ls_y,
1451  8 * tile_cols_len * bytesperpixel);
1452  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1453  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1454  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1455  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1456  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1457  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1458  }
1459 
1460  vp9_report_tile_progress(s, row >> 3, 1);
1461  }
1462  }
1463  return 0;
1464 }
1465 
1466 static av_always_inline
1467 int loopfilter_proc(AVCodecContext *avctx)
1468 {
1469  VP9Context *s = avctx->priv_data;
1470  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1471  VP9Filter *lflvl_ptr;
1472  int bytesperpixel = s->bytesperpixel, col, i;
1473  AVFrame *f;
1474 
1475  f = s->s.frames[CUR_FRAME].tf.f;
1476  ls_y = f->linesize[0];
1477  ls_uv =f->linesize[1];
1478 
1479  for (i = 0; i < s->sb_rows; i++) {
1480  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1481 
1482  if (s->s.h.filter.level) {
1483  yoff = (ls_y * 64)*i;
1484  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1485  lflvl_ptr = s->lflvl+s->sb_cols*i;
1486  for (col = 0; col < s->cols;
1487  col += 8, yoff += 64 * bytesperpixel,
1488  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1489  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1490  yoff, uvoff);
1491  }
1492  }
1493  }
1494  return 0;
1495 }
1496 #endif
1497 
1499 {
1500  AVVideoEncParams *par;
1501  unsigned int tile, nb_blocks = 0;
1502 
1503  if (s->s.h.segmentation.enabled) {
1504  for (tile = 0; tile < s->active_tile_cols; tile++)
1505  nb_blocks += s->td[tile].nb_block_structure;
1506  }
1507 
1509  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1510  if (!par)
1511  return AVERROR(ENOMEM);
1512 
1513  par->qp = s->s.h.yac_qi;
1514  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1515  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1516  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1517  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1518  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1519 
1520  if (nb_blocks) {
1521  unsigned int block = 0;
1522  unsigned int tile, block_tile;
1523 
1524  for (tile = 0; tile < s->active_tile_cols; tile++) {
1525  VP9TileData *td = &s->td[tile];
1526 
1527  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1529  unsigned int row = td->block_structure[block_tile].row;
1530  unsigned int col = td->block_structure[block_tile].col;
1531  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1532 
1533  b->src_x = col * 8;
1534  b->src_y = row * 8;
1535  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1536  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1537 
1538  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1539  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1540  if (s->s.h.segmentation.absolute_vals)
1541  b->delta_qp -= par->qp;
1542  }
1543  }
1544  }
1545  }
1546 
1547  return 0;
1548 }
1549 
1551  int *got_frame, AVPacket *pkt)
1552 {
1553  const uint8_t *data = pkt->data;
1554  int size = pkt->size;
1555  VP9Context *s = avctx->priv_data;
1556  int ret, i, j, ref;
1557  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1558  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1559  AVFrame *f;
1560 
1561  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1562  return ret;
1563  } else if (ret == 0) {
1564  if (!s->s.refs[ref].f->buf[0]) {
1565  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1566  return AVERROR_INVALIDDATA;
1567  }
1568  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1569  return ret;
1570  frame->pts = pkt->pts;
1571  frame->pkt_dts = pkt->dts;
1572  for (i = 0; i < 8; i++) {
1573  if (s->next_refs[i].f->buf[0])
1574  ff_thread_release_ext_buffer(&s->next_refs[i]);
1575  if (s->s.refs[i].f->buf[0] &&
1576  (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
1577  return ret;
1578  }
1579  *got_frame = 1;
1580  return pkt->size;
1581  }
1582  data += ret;
1583  size -= ret;
1584 
1585  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
1586  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
1587  vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
1588  if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1589  (ret = vp9_frame_ref(&s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
1590  return ret;
1591  }
1592  if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
1593  vp9_frame_unref(&s->s.frames[REF_FRAME_MVPAIR]);
1594  if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1595  (ret = vp9_frame_ref(&s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
1596  return ret;
1597  if (s->s.frames[CUR_FRAME].tf.f->buf[0])
1598  vp9_frame_unref(&s->s.frames[CUR_FRAME]);
1599  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1600  return ret;
1601  f = s->s.frames[CUR_FRAME].tf.f;
1602  if (s->s.h.keyframe)
1603  f->flags |= AV_FRAME_FLAG_KEY;
1604  else
1605  f->flags &= ~AV_FRAME_FLAG_KEY;
1606  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1607 
1608  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
1609  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1610  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1611  vp9_frame_unref(&s->s.frames[REF_FRAME_SEGMAP]);
1612  }
1613 
1614  // ref frame setup
1615  for (i = 0; i < 8; i++) {
1616  if (s->next_refs[i].f->buf[0])
1617  ff_thread_release_ext_buffer(&s->next_refs[i]);
1618  if (s->s.h.refreshrefmask & (1 << i)) {
1619  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
1620  } else if (s->s.refs[i].f->buf[0]) {
1621  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
1622  }
1623  if (ret < 0)
1624  return ret;
1625  }
1626 
1627  if (avctx->hwaccel) {
1628  const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
1629  ret = hwaccel->start_frame(avctx, NULL, 0);
1630  if (ret < 0)
1631  return ret;
1632  ret = hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1633  if (ret < 0)
1634  return ret;
1635  ret = hwaccel->end_frame(avctx);
1636  if (ret < 0)
1637  return ret;
1638  goto finish;
1639  }
1640 
1641  // main tile decode loop
1642  memset(s->above_partition_ctx, 0, s->cols);
1643  memset(s->above_skip_ctx, 0, s->cols);
1644  if (s->s.h.keyframe || s->s.h.intraonly) {
1645  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1646  } else {
1647  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1648  }
1649  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1650  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1651  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1652  memset(s->above_segpred_ctx, 0, s->cols);
1653  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1654  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1655  if ((ret = update_block_buffers(avctx)) < 0) {
1656  av_log(avctx, AV_LOG_ERROR,
1657  "Failed to allocate block buffers\n");
1658  return ret;
1659  }
1660  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1661  int j, k, l, m;
1662 
1663  for (i = 0; i < 4; i++) {
1664  for (j = 0; j < 2; j++)
1665  for (k = 0; k < 2; k++)
1666  for (l = 0; l < 6; l++)
1667  for (m = 0; m < 6; m++)
1668  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1669  s->prob.coef[i][j][k][l][m], 3);
1670  if (s->s.h.txfmmode == i)
1671  break;
1672  }
1673  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1674  ff_thread_finish_setup(avctx);
1675  } else if (!s->s.h.refreshctx) {
1676  ff_thread_finish_setup(avctx);
1677  }
1678 
1679 #if HAVE_THREADS
1680  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1681  for (i = 0; i < s->sb_rows; i++)
1682  atomic_store(&s->entries[i], 0);
1683  }
1684 #endif
1685 
1686  do {
1687  for (i = 0; i < s->active_tile_cols; i++) {
1688  s->td[i].b = s->td[i].b_base;
1689  s->td[i].block = s->td[i].block_base;
1690  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1691  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1692  s->td[i].eob = s->td[i].eob_base;
1693  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1694  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1695  s->td[i].error_info = 0;
1696  }
1697 
1698 #if HAVE_THREADS
1699  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1700  int tile_row, tile_col;
1701 
1702  av_assert1(!s->pass);
1703 
1704  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1705  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1706  int64_t tile_size;
1707 
1708  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1709  tile_row == s->s.h.tiling.tile_rows - 1) {
1710  tile_size = size;
1711  } else {
1712  tile_size = AV_RB32(data);
1713  data += 4;
1714  size -= 4;
1715  }
1716  if (tile_size > size)
1717  return AVERROR_INVALIDDATA;
1718  ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1719  if (ret < 0)
1720  return ret;
1721  if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1722  return AVERROR_INVALIDDATA;
1723  data += tile_size;
1724  size -= tile_size;
1725  }
1726  }
1727 
1728  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1729  } else
1730 #endif
1731  {
1732  ret = decode_tiles(avctx, data, size);
1733  if (ret < 0) {
1734  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1735  return ret;
1736  }
1737  }
1738 
1739  // Sum all counts fields into td[0].counts for tile threading
1740  if (avctx->active_thread_type == FF_THREAD_SLICE)
1741  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1742  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1743  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1744 
1745  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1747  ff_thread_finish_setup(avctx);
1748  }
1749  } while (s->pass++ == 1);
1750  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1751 
1752  if (s->td->error_info < 0) {
1753  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1754  s->td->error_info = 0;
1755  return AVERROR_INVALIDDATA;
1756  }
1758  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1759  if (ret < 0)
1760  return ret;
1761  }
1762 
1763 finish:
1764  // ref frame setup
1765  for (i = 0; i < 8; i++) {
1766  if (s->s.refs[i].f->buf[0])
1767  ff_thread_release_ext_buffer(&s->s.refs[i]);
1768  if (s->next_refs[i].f->buf[0] &&
1769  (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
1770  return ret;
1771  }
1772 
1773  if (!s->s.h.invisible) {
1774  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1775  return ret;
1776  *got_frame = 1;
1777  }
1778 
1779  return pkt->size;
1780 }
1781 
1783 {
1784  VP9Context *s = avctx->priv_data;
1785  int i;
1786 
1787  for (i = 0; i < 3; i++)
1788  vp9_frame_unref(&s->s.frames[i]);
1789  for (i = 0; i < 8; i++)
1790  ff_thread_release_ext_buffer(&s->s.refs[i]);
1791 
1792  if (FF_HW_HAS_CB(avctx, flush))
1793  FF_HW_SIMPLE_CALL(avctx, flush);
1794 }
1795 
1797 {
1798  VP9Context *s = avctx->priv_data;
1799  int ret;
1800 
1801  s->last_bpp = 0;
1802  s->s.h.filter.sharpness = -1;
1803 
1804 #if HAVE_THREADS
1805  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1806  ret = ff_pthread_init(s, vp9_context_offsets);
1807  if (ret < 0)
1808  return ret;
1809  }
1810 #endif
1811 
1812  for (int i = 0; i < 3; i++) {
1813  s->s.frames[i].tf.f = av_frame_alloc();
1814  if (!s->s.frames[i].tf.f)
1815  return AVERROR(ENOMEM);
1816  }
1817  for (int i = 0; i < 8; i++) {
1818  s->s.refs[i].f = av_frame_alloc();
1819  s->next_refs[i].f = av_frame_alloc();
1820  if (!s->s.refs[i].f || !s->next_refs[i].f)
1821  return AVERROR(ENOMEM);
1822  }
1823  return 0;
1824 }
1825 
1826 #if HAVE_THREADS
1827 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1828 {
1829  int i, ret;
1830  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1831 
1832  for (i = 0; i < 3; i++) {
1833  if (s->s.frames[i].tf.f->buf[0])
1834  vp9_frame_unref(&s->s.frames[i]);
1835  if (ssrc->s.frames[i].tf.f->buf[0]) {
1836  if ((ret = vp9_frame_ref(&s->s.frames[i], &ssrc->s.frames[i])) < 0)
1837  return ret;
1838  }
1839  }
1840  for (i = 0; i < 8; i++) {
1841  if (s->s.refs[i].f->buf[0])
1842  ff_thread_release_ext_buffer(&s->s.refs[i]);
1843  if (ssrc->next_refs[i].f->buf[0]) {
1844  if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
1845  return ret;
1846  }
1847  }
1848  ff_refstruct_replace(&s->frame_extradata_pool, ssrc->frame_extradata_pool);
1849  s->frame_extradata_pool_size = ssrc->frame_extradata_pool_size;
1850 
1851  s->s.h.invisible = ssrc->s.h.invisible;
1852  s->s.h.keyframe = ssrc->s.h.keyframe;
1853  s->s.h.intraonly = ssrc->s.h.intraonly;
1854  s->ss_v = ssrc->ss_v;
1855  s->ss_h = ssrc->ss_h;
1856  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1857  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1858  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1859  s->bytesperpixel = ssrc->bytesperpixel;
1860  s->gf_fmt = ssrc->gf_fmt;
1861  s->w = ssrc->w;
1862  s->h = ssrc->h;
1863  s->s.h.bpp = ssrc->s.h.bpp;
1864  s->bpp_index = ssrc->bpp_index;
1865  s->pix_fmt = ssrc->pix_fmt;
1866  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1867  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1868  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1869  sizeof(s->s.h.segmentation.feat));
1870 
1871  return 0;
1872 }
1873 #endif
1874 
1876  .p.name = "vp9",
1877  CODEC_LONG_NAME("Google VP9"),
1878  .p.type = AVMEDIA_TYPE_VIDEO,
1879  .p.id = AV_CODEC_ID_VP9,
1880  .priv_data_size = sizeof(VP9Context),
1881  .init = vp9_decode_init,
1882  .close = vp9_decode_free,
1885  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
1888  .flush = vp9_decode_flush,
1889  UPDATE_THREAD_CONTEXT(vp9_decode_update_thread_context),
1890  .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
1891  .bsfs = "vp9_superframe_split",
1892  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1893 #if CONFIG_VP9_DXVA2_HWACCEL
1894  HWACCEL_DXVA2(vp9),
1895 #endif
1896 #if CONFIG_VP9_D3D11VA_HWACCEL
1897  HWACCEL_D3D11VA(vp9),
1898 #endif
1899 #if CONFIG_VP9_D3D11VA2_HWACCEL
1900  HWACCEL_D3D11VA2(vp9),
1901 #endif
1902 #if CONFIG_VP9_NVDEC_HWACCEL
1903  HWACCEL_NVDEC(vp9),
1904 #endif
1905 #if CONFIG_VP9_VAAPI_HWACCEL
1906  HWACCEL_VAAPI(vp9),
1907 #endif
1908 #if CONFIG_VP9_VDPAU_HWACCEL
1909  HWACCEL_VDPAU(vp9),
1910 #endif
1911 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1912  HWACCEL_VIDEOTOOLBOX(vp9),
1913 #endif
1914  NULL
1915  },
1916 };
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1435
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: codec_internal.h:64
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1259
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:108
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
ff_refstruct_ref
void * ff_refstruct_ref(void *obj)
Create a new reference to an object managed via this API, i.e.
Definition: refstruct.c:140
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:52
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:50
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1029
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1264
ff_refstruct_pool_alloc
FFRefStructPool * ff_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to ff_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
Definition: refstruct.c:335
VP9Frame::segmentation_map
uint8_t * segmentation_map
Definition: vp9shared.h:68
VP9Frame
Definition: vp9shared.h:65
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
ff_vp9_decoder
const FFCodec ff_vp9_decoder
Definition: vp9.c:1875
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1088
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:44
vp9_decode_flush
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1782
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:51
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:64
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:673
BlockPartition
BlockPartition
Definition: vp9shared.h:35
AVPacket::data
uint8_t * data
Definition: packet.h:491
DC_PRED
@ DC_PRED
Definition: vp9.h:48
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:66
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:148
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:169
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1167
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:170
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:503
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:468
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
FFCodec
Definition: codec_internal.h:127
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:600
FF_HW_SIMPLE_CALL
#define FF_HW_SIMPLE_CALL(avctx, function)
Definition: hwaccel_internal.h:174
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:48
vp89_rac.h
VP9Filter
Definition: vp9dec.h:78
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
VPXRangeCoder
Definition: vpx_rac.h:35
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:91
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
VP9Block
Definition: vp9dec.h:84
VP9Frame::tf
ThreadFrame tf
Definition: vp9shared.h:66
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:605
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:603
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
finish
static void finish(void)
Definition: movenc.c:342
FFHWAccel
Definition: hwaccel_internal.h:34
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:142
ff_refstruct_pool_uninit
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:484
GetBitContext
Definition: get_bits.h:108
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:521
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:72
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:36
vp9_frame_unref
static void vp9_frame_unref(VP9Frame *f)
Definition: vp9.c:100
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference.
Definition: vp9shared.h:72
refstruct.h
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1233
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:88
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:471
avassert.h
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1905
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:589
BL_8X8
@ BL_8X8
Definition: vp9shared.h:79
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:38
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:1838
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:198
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:606
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:421
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:220
vp9data.h
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
bits
uint8_t bits
Definition: vp3data.h:128
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
decode.h
get_bits.h
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:88
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1550
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
frame
static AVFrame * frame
Definition: demux_decode.c:54
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:871
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
threadframe.h
NULL
#define NULL
Definition: coverity.c:32
vp9_frame_ref
static int vp9_frame_ref(VP9Frame *dst, VP9Frame *src)
Definition: vp9.c:149
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1039
hwaccel_internal.h
VP9Context
Definition: vp9dec.h:96
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:169
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
vp89_rac_get_uint
static av_unused int vp89_rac_get_uint(VPXRangeCoder *c, int bits)
Definition: vp89_rac.h:41
profiles.h
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:470
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:1012
pthread_internal.h
AVFrame::pkt_dts
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:459
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:281
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:469
VP9mv
Definition: vp9shared.h:55
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:39
FF_HW_HAS_CB
#define FF_HW_HAS_CB(avctx, function)
Definition: hwaccel_internal.h:177
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:79
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
vp9.h
VP9Frame::uses_2pass
int uses_2pass
Definition: vp9shared.h:70
f
f
Definition: af_crystalizer.c:121
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:492
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:106
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:361
codec_internal.h
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:65
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:42
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:441
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:473
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:90
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:69
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1224
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:475
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1544
AVCodecHWConfigInternal
Definition: hwconfig.h:25
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:306
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:490
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:78
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:373
VP9Frame::extradata
void * extradata
RefStruct reference.
Definition: vp9shared.h:67
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:68
vpx_rac_is_end
static av_always_inline int vpx_rac_is_end(VPXRangeCoder *c)
returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vpx_rac.h:51
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1543
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:126
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:607
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
#define FF_REFSTRUCT_POOL_FLAG_ZERO_EVERY_TIME
If this flag is set, the entries will be zeroed before being returned to the user (after the init or ...
Definition: refstruct.h:221
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
update_prob
static int update_prob(VPXRangeCoder *c, int p)
Definition: vp9.c:383
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:245
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:484
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1904
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:610
vpx_rac.h
decode012
static int BS_FUNC() decode012(BSCTX *bc)
Return decoded truncated unary code for the values 0, 1, 2.
Definition: bitstream_template.h:436
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:485
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:599
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:367
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:602
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:658
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:984
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:656
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:74
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:142
vp89_rac_get_tree
static av_always_inline int vp89_rac_get_tree(VPXRangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp89_rac.h:54
BL_64X64
@ BL_64X64
Definition: vp9shared.h:76
ret
ret
Definition: filter_design.txt:187
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1796
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
hwaccel
static const char * hwaccel
Definition: ffplay.c:356
ff_vpx_init_range_decoder
int ff_vpx_init_range_decoder(VPXRangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vpx_rac.c:42
ff_refstruct_replace
void ff_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:93
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:60
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:472
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:411
VP9TileData
Definition: vp9dec.h:167
vp89_rac_get
static av_always_inline int vp89_rac_get(VPXRangeCoder *c)
Definition: vp89_rac.h:36
AVCodecContext
main external API structure.
Definition: avcodec.h:441
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1551
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:81
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
VP9Frame::mv
VP9mvrefPair * mv
Definition: vp9shared.h:69
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1596
ffhwaccel
static const FFHWAccel * ffhwaccel(const AVHWAccel *codec)
Definition: hwaccel_internal.h:166
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1263
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:43
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
BlockLevel
BlockLevel
Definition: vp9shared.h:75
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2057
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:104
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
flush
void(* flush)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:367
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:338
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:168
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1498
AVPacket
This structure stores compressed data.
Definition: packet.h:468
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:468
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:37
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:178
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
videodsp.h
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:70
d
d
Definition: ffmpeg_filter.c:372
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Get the block at the specified.
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:474
h
h
Definition: vp9dsp_template.c:2038
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:601
ff_refstruct_unref
void ff_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:152
ff_refstruct_pool_get
void * ff_refstruct_pool_get(FFRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1216
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2884
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:75
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540