FFmpeg
vp56.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * VP5 and VP6 compatible video decoder (common features)
24  */
25 
26 #include "libavutil/mem.h"
27 #include "avcodec.h"
28 #include "bytestream.h"
29 #include "decode.h"
30 #include "h264chroma.h"
31 #include "vp56.h"
32 #include "vp56data.h"
33 #include "vpx_rac.h"
34 
35 
36 void ff_vp56_init_dequant(VP56Context *s, int quantizer)
37 {
38  if (s->quantizer != quantizer)
39  ff_vp3dsp_set_bounding_values(s->bounding_values_array, ff_vp56_filter_threshold[quantizer]);
40  s->quantizer = quantizer;
41  s->dequant_dc = ff_vp56_dc_dequant[quantizer] << 2;
42  s->dequant_ac = ff_vp56_ac_dequant[quantizer] << 2;
43 }
44 
45 static int vp56_get_vectors_predictors(VP56Context *s, int row, int col,
47 {
48  int nb_pred = 0;
49  VP56mv vect[2] = {{0,0}, {0,0}};
50  int pos, offset;
51  VP56mv mvp;
52 
53  for (pos=0; pos<12; pos++) {
56  if (mvp.x < 0 || mvp.x >= s->mb_width ||
57  mvp.y < 0 || mvp.y >= s->mb_height)
58  continue;
59  offset = mvp.x + s->mb_width*mvp.y;
60 
61  if (ff_vp56_reference_frame[s->macroblocks[offset].type] != ref_frame)
62  continue;
63  if ((s->macroblocks[offset].mv.x == vect[0].x &&
64  s->macroblocks[offset].mv.y == vect[0].y) ||
65  (s->macroblocks[offset].mv.x == 0 &&
66  s->macroblocks[offset].mv.y == 0))
67  continue;
68 
69  vect[nb_pred++] = s->macroblocks[offset].mv;
70  if (nb_pred > 1) {
71  nb_pred = -1;
72  break;
73  }
74  s->vector_candidate_pos = pos;
75  }
76 
77  s->vector_candidate[0] = vect[0];
78  s->vector_candidate[1] = vect[1];
79 
80  return nb_pred+1;
81 }
82 
83 static void vp56_parse_mb_type_models(VP56Context *s)
84 {
85  VPXRangeCoder *c = &s->c;
86  VP56Model *model = s->modelp;
87  int i, ctx, type;
88 
89  for (ctx=0; ctx<3; ctx++) {
90  if (vpx_rac_get_prob_branchy(c, 174)) {
91  int idx = vp56_rac_gets(c, 4);
92  memcpy(model->mb_types_stats[ctx],
94  sizeof(model->mb_types_stats[ctx]));
95  }
96  if (vpx_rac_get_prob_branchy(c, 254)) {
97  for (type=0; type<10; type++) {
98  for(i=0; i<2; i++) {
99  if (vpx_rac_get_prob_branchy(c, 205)) {
100  int delta, sign = vpx_rac_get(c);
101 
104  if (!delta)
105  delta = 4 * vp56_rac_gets(c, 7);
106  model->mb_types_stats[ctx][type][i] += (delta ^ -sign) + sign;
107  }
108  }
109  }
110  }
111  }
112 
113  /* compute MB type probability tables based on previous MB type */
114  for (ctx=0; ctx<3; ctx++) {
115  int p[10];
116 
117  for (type=0; type<10; type++)
118  p[type] = 100 * model->mb_types_stats[ctx][type][1];
119 
120  for (type=0; type<10; type++) {
121  int p02, p34, p0234, p17, p56, p89, p5689, p156789;
122 
123  /* conservative MB type probability */
124  model->mb_type[ctx][type][0] = 255 - (255 * model->mb_types_stats[ctx][type][0]) / (1 + model->mb_types_stats[ctx][type][0] + model->mb_types_stats[ctx][type][1]);
125 
126  p[type] = 0; /* same MB type => weight is null */
127 
128  /* binary tree parsing probabilities */
129  p02 = p[0] + p[2];
130  p34 = p[3] + p[4];
131  p0234 = p02 + p34;
132  p17 = p[1] + p[7];
133  p56 = p[5] + p[6];
134  p89 = p[8] + p[9];
135  p5689 = p56 + p89;
136  p156789 = p17 + p5689;
137 
138  model->mb_type[ctx][type][1] = 1 + 255 * p0234/(1+p0234+p156789);
139  model->mb_type[ctx][type][2] = 1 + 255 * p02 / (1+p0234);
140  model->mb_type[ctx][type][3] = 1 + 255 * p17 / (1+p156789);
141  model->mb_type[ctx][type][4] = 1 + 255 * p[0] / (1+p02);
142  model->mb_type[ctx][type][5] = 1 + 255 * p[3] / (1+p34);
143  model->mb_type[ctx][type][6] = 1 + 255 * p[1] / (1+p17);
144  model->mb_type[ctx][type][7] = 1 + 255 * p56 / (1+p5689);
145  model->mb_type[ctx][type][8] = 1 + 255 * p[5] / (1+p56);
146  model->mb_type[ctx][type][9] = 1 + 255 * p[8] / (1+p89);
147 
148  /* restore initial value */
149  p[type] = 100 * model->mb_types_stats[ctx][type][1];
150  }
151  }
152 }
153 
154 static VP56mb vp56_parse_mb_type(VP56Context *s,
155  VP56mb prev_type, int ctx)
156 {
157  uint8_t *mb_type_model = s->modelp->mb_type[ctx][prev_type];
158  VPXRangeCoder *c = &s->c;
159 
160  if (vpx_rac_get_prob_branchy(c, mb_type_model[0]))
161  return prev_type;
162  else
163  return vp56_rac_get_tree(c, ff_vp56_pmbt_tree, mb_type_model);
164 }
165 
166 static void vp56_decode_4mv(VP56Context *s, int row, int col)
167 {
168  VP56mv mv = {0,0};
169  int type[4];
170  int b;
171 
172  /* parse each block type */
173  for (b=0; b<4; b++) {
174  type[b] = vp56_rac_gets(&s->c, 2);
175  if (type[b])
176  type[b]++; /* only returns 0, 2, 3 or 4 (all INTER_PF) */
177  }
178 
179  /* get vectors */
180  for (b=0; b<4; b++) {
181  switch (type[b]) {
183  s->mv[b] = (VP56mv) {0,0};
184  break;
186  s->parse_vector_adjustment(s, &s->mv[b]);
187  break;
188  case VP56_MB_INTER_V1_PF:
189  s->mv[b] = s->vector_candidate[0];
190  break;
191  case VP56_MB_INTER_V2_PF:
192  s->mv[b] = s->vector_candidate[1];
193  break;
194  }
195  mv.x += s->mv[b].x;
196  mv.y += s->mv[b].y;
197  }
198 
199  /* this is the one selected for the whole MB for prediction */
200  s->macroblocks[row * s->mb_width + col].mv = s->mv[3];
201 
202  /* chroma vectors are average luma vectors */
203  s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2);
204  s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2);
205 }
206 
207 static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
208 {
209  VP56mv *mv, vect = {0,0};
210  int ctx, b;
211 
213  s->mb_type = vp56_parse_mb_type(s, s->mb_type, ctx);
214  s->macroblocks[row * s->mb_width + col].type = s->mb_type;
215 
216  switch (s->mb_type) {
217  case VP56_MB_INTER_V1_PF:
218  mv = &s->vector_candidate[0];
219  break;
220 
221  case VP56_MB_INTER_V2_PF:
222  mv = &s->vector_candidate[1];
223  break;
224 
225  case VP56_MB_INTER_V1_GF:
227  mv = &s->vector_candidate[0];
228  break;
229 
230  case VP56_MB_INTER_V2_GF:
232  mv = &s->vector_candidate[1];
233  break;
234 
236  s->parse_vector_adjustment(s, &vect);
237  mv = &vect;
238  break;
239 
242  s->parse_vector_adjustment(s, &vect);
243  mv = &vect;
244  break;
245 
246  case VP56_MB_INTER_4V:
247  vp56_decode_4mv(s, row, col);
248  return s->mb_type;
249 
250  default:
251  mv = &vect;
252  break;
253  }
254 
255  s->macroblocks[row*s->mb_width + col].mv = *mv;
256 
257  /* same vector for all blocks */
258  for (b=0; b<6; b++)
259  s->mv[b] = *mv;
260 
261  return s->mb_type;
262 }
263 
264 static VP56mb vp56_conceal_mv(VP56Context *s, int row, int col)
265 {
266  VP56mv *mv, vect = {0,0};
267  int b;
268 
269  s->mb_type = VP56_MB_INTER_NOVEC_PF;
270  s->macroblocks[row * s->mb_width + col].type = s->mb_type;
271 
272  mv = &vect;
273 
274  s->macroblocks[row*s->mb_width + col].mv = *mv;
275 
276  /* same vector for all blocks */
277  for (b=0; b<6; b++)
278  s->mv[b] = *mv;
279 
280  return s->mb_type;
281 }
282 
283 static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
284 {
285  int idx = s->idct_scantable[0];
286  int b;
287 
288  for (b=0; b<6; b++) {
289  VP56RefDc *ab = &s->above_blocks[s->above_block_idx[b]];
290  VP56RefDc *lb = &s->left_block[ff_vp56_b6to4[b]];
291  int count = 0;
292  int dc = 0;
293  int i;
294 
295  if (ref_frame == lb->ref_frame) {
296  dc += lb->dc_coeff;
297  count++;
298  }
299  if (ref_frame == ab->ref_frame) {
300  dc += ab->dc_coeff;
301  count++;
302  }
303  if (s->avctx->codec->id == AV_CODEC_ID_VP5)
304  for (i=0; i<2; i++)
305  if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) {
306  dc += ab[-1+2*i].dc_coeff;
307  count++;
308  }
309  if (count == 0)
310  dc = s->prev_dc[ff_vp56_b2p[b]][ref_frame];
311  else if (count == 2)
312  dc /= 2;
313 
314  s->block_coeff[b][idx] += dc;
315  s->prev_dc[ff_vp56_b2p[b]][ref_frame] = s->block_coeff[b][idx];
316  ab->dc_coeff = s->block_coeff[b][idx];
317  ab->ref_frame = ref_frame;
318  lb->dc_coeff = s->block_coeff[b][idx];
319  lb->ref_frame = ref_frame;
320  s->block_coeff[b][idx] *= s->dequant_dc;
321  }
322 }
323 
324 static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv,
325  ptrdiff_t stride, int dx, int dy)
326 {
327  if (s->avctx->codec->id == AV_CODEC_ID_VP5) {
328  int t = ff_vp56_filter_threshold[s->quantizer];
329  if (dx) s->vp56dsp.edge_filter_hor(yuv + 10-dx , stride, t);
330  if (dy) s->vp56dsp.edge_filter_ver(yuv + stride*(10-dy), stride, t);
331  } else {
332  int * bounding_values = s->bounding_values_array + 127;
333  if (dx)
334  ff_vp3dsp_h_loop_filter_12(yuv + 10-dx, stride, bounding_values);
335  if (dy)
336  ff_vp3dsp_v_loop_filter_12(yuv + stride*(10-dy), stride, bounding_values);
337  }
338 }
339 
340 static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src,
341  ptrdiff_t stride, int x, int y, ptrdiff_t ref_stride)
342 {
343  uint8_t *dst = s->frames[VP56_FRAME_CURRENT]->data[plane] + s->block_offset[b];
344  uint8_t *src_block;
345  int src_offset;
346  int overlap_offset = 0;
347  int mask = s->vp56_coord_div[b] - 1;
348  int deblock_filtering = s->deblock_filtering;
349  int dx;
350  int dy;
351 
352  if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
353  (s->avctx->skip_loop_filter >= AVDISCARD_NONKEY
354  && !(s->frames[VP56_FRAME_CURRENT]->flags & AV_FRAME_FLAG_KEY)))
355  deblock_filtering = 0;
356 
357  dx = s->mv[b].x / s->vp56_coord_div[b];
358  dy = s->mv[b].y / s->vp56_coord_div[b];
359 
360  if (b >= 4) {
361  x /= 2;
362  y /= 2;
363  }
364  x += dx - 2;
365  y += dy - 2;
366 
367  if (s->interlaced && s->il_block) {
368  /* extract 12*(4+16+4) block from frame (containing both fields), then treat src_block as specific field */
369  s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
370  src + s->block_offset[b] + (dy-4)*ref_stride + (dx-2),
371  ref_stride, ref_stride,
372  12, 24, x, y - 2,
373  s->plane_width[plane],
374  s->plane_height[plane]);
375  src_block = s->edge_emu_buffer;
376  src_offset = 2 + 4*ref_stride;
377  } else if (x<0 || x+12>=s->plane_width[plane] ||
378  y<0 || y+12>=s->plane_height[plane]) {
379  s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
380  src + s->block_offset[b] + (dy-2)*stride + (dx-2),
381  stride, stride,
382  12, 12, x, y,
383  s->plane_width[plane],
384  s->plane_height[plane]);
385  src_block = s->edge_emu_buffer;
386  src_offset = 2 + 2*stride;
387  } else if (deblock_filtering) {
388  /* only need a 12x12 block, but there is no such dsp function, */
389  /* so copy a 16x12 block */
390  s->hdsp.put_pixels_tab[0][0](s->edge_emu_buffer,
391  src + s->block_offset[b] + (dy-2)*stride + (dx-2),
392  stride, 12);
393  src_block = s->edge_emu_buffer;
394  src_offset = 2 + 2*stride;
395  } else {
396  src_block = src;
397  src_offset = s->block_offset[b] + dy*stride + dx;
398  }
399 
400  if (deblock_filtering)
401  vp56_deblock_filter(s, src_block, stride, dx&7, dy&7);
402 
403  if (s->mv[b].x & mask)
404  overlap_offset += (s->mv[b].x > 0) ? 1 : -1;
405  if (s->mv[b].y & mask)
406  overlap_offset += (s->mv[b].y > 0) ? stride : -stride;
407 
408  if (overlap_offset) {
409  if (s->filter)
410  s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,
411  stride, s->mv[b], mask, s->filter_selection, b<4);
412  else
413  s->vp3dsp.put_no_rnd_pixels_l2(dst, src_block+src_offset,
414  src_block+src_offset+overlap_offset,
415  stride, 8);
416  } else {
417  s->hdsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8);
418  }
419 }
420 
421 static void vp56_idct_put(VP56Context *s, uint8_t * dest, ptrdiff_t stride, int16_t *block, int selector)
422 {
423  if (selector > 10 || selector == 1)
424  s->vp3dsp.idct_put(dest, stride, block);
425  else
427 }
428 
429 static void vp56_idct_add(VP56Context *s, uint8_t * dest, ptrdiff_t stride, int16_t *block, int selector)
430 {
431  if (selector > 10)
432  s->vp3dsp.idct_add(dest, stride, block);
433  else if (selector > 1)
435  else
436  s->vp3dsp.idct_dc_add(dest, stride, block);
437 }
438 
439 static av_always_inline void vp56_render_mb(VP56Context *s, int row, int col, int is_alpha, VP56mb mb_type)
440 {
441  int b, ab, b_max, plane, off;
442  AVFrame *frame_current, *frame_ref;
444  ptrdiff_t ref_stride[4];
445 
447 
448  frame_current = s->frames[VP56_FRAME_CURRENT];
449  frame_ref = s->frames[ref_frame];
450  if (mb_type != VP56_MB_INTRA && !frame_ref->data[0])
451  return;
452 
453  memcpy(ref_stride, s->stride, sizeof(s->stride));
454  if (s->interlaced && s->il_block) {
455  s->block_offset[2] -= s->stride[0] * 7;
456  s->block_offset[3] -= s->stride[0] * 7;
457  s->stride[0] *= 2;
458  }
459 
460  ab = 6*is_alpha;
461  b_max = 6 - 2*is_alpha;
462 
463  switch (mb_type) {
464  case VP56_MB_INTRA:
465  for (b=0; b<b_max; b++) {
466  plane = ff_vp56_b2p[b+ab];
467  vp56_idct_put(s, frame_current->data[plane] + s->block_offset[b],
468  s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
469  }
470  break;
471 
474  for (b=0; b<b_max; b++) {
475  plane = ff_vp56_b2p[b+ab];
476  off = s->block_offset[b];
477  s->hdsp.put_pixels_tab[1][0](frame_current->data[plane] + off,
478  frame_ref->data[plane] + off,
479  s->stride[plane], 8);
480  vp56_idct_add(s, frame_current->data[plane] + off,
481  s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
482  }
483  break;
484 
486  case VP56_MB_INTER_V1_PF:
487  case VP56_MB_INTER_V2_PF:
489  case VP56_MB_INTER_4V:
490  case VP56_MB_INTER_V1_GF:
491  case VP56_MB_INTER_V2_GF:
492  for (b=0; b<b_max; b++) {
493  int x_off = b==1 || b==3 ? 8 : 0;
494  int y_off = b==2 || b==3 ? (s->interlaced && s->il_block ? 1 : 8) : 0;
495  plane = ff_vp56_b2p[b+ab];
496  vp56_mc(s, b, plane, frame_ref->data[plane], s->stride[plane],
497  16*col+x_off, 16*row+y_off, ref_stride[plane]);
498  vp56_idct_add(s, frame_current->data[plane] + s->block_offset[b],
499  s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
500  }
501  break;
502  }
503 
504  if (is_alpha) {
505  s->block_coeff[4][0] = 0;
506  s->block_coeff[5][0] = 0;
507  }
508 
509  if (s->interlaced && s->il_block) {
510  s->stride[0] /= 2;
511  s->block_offset[2] += s->stride[0] * 7;
512  s->block_offset[3] += s->stride[0] * 7;
513  }
514 }
515 
516 static int vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
517 {
518  VP56mb mb_type;
519  int ret;
520 
521  if (s->interlaced) {
522  int prob = s->il_prob;
523 
524  if (col > 0) {
525  if (s->il_block)
526  prob -= prob >> 1;
527  else
528  prob += (256 - prob) >> 1; /* can be simplified/combined */
529  }
530 
531  s->il_block = vpx_rac_get_prob(&s->c, prob);
532  }
533 
534  if (s->frames[VP56_FRAME_CURRENT]->flags & AV_FRAME_FLAG_KEY)
535  mb_type = VP56_MB_INTRA;
536  else
537  mb_type = vp56_decode_mv(s, row, col);
538 
539  ret = s->parse_coeff(s);
540  if (ret < 0)
541  return ret;
542 
543  vp56_render_mb(s, row, col, is_alpha, mb_type);
544 
545  return 0;
546 }
547 
548 static int vp56_conceal_mb(VP56Context *s, int row, int col, int is_alpha)
549 {
550  VP56mb mb_type;
551 
552  if (s->frames[VP56_FRAME_CURRENT]->flags & AV_FRAME_FLAG_KEY)
553  mb_type = VP56_MB_INTRA;
554  else
555  mb_type = vp56_conceal_mv(s, row, col);
556 
557  vp56_render_mb(s, row, col, is_alpha, mb_type);
558 
559  return 0;
560 }
561 
562 static int vp56_size_changed(VP56Context *s)
563 {
564  AVCodecContext *avctx = s->avctx;
565  int stride = s->frames[VP56_FRAME_CURRENT]->linesize[0];
566  int i;
567 
568  s->plane_width[0] = s->plane_width[3] = avctx->coded_width;
569  s->plane_width[1] = s->plane_width[2] = avctx->coded_width/2;
570  s->plane_height[0] = s->plane_height[3] = avctx->coded_height;
571  s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2;
572 
573  s->have_undamaged_frame = 0;
574 
575  for (i=0; i<4; i++)
576  s->stride[i] = s->flip * s->frames[VP56_FRAME_CURRENT]->linesize[i];
577 
578  s->mb_width = (avctx->coded_width +15) / 16;
579  s->mb_height = (avctx->coded_height+15) / 16;
580 
581  if (s->mb_width > 1000 || s->mb_height > 1000) {
582  ff_set_dimensions(avctx, 0, 0);
583  av_log(avctx, AV_LOG_ERROR, "picture too big\n");
584  return AVERROR_INVALIDDATA;
585  }
586 
587  av_reallocp_array(&s->above_blocks, 4*s->mb_width+6,
588  sizeof(*s->above_blocks));
589  av_reallocp_array(&s->macroblocks, s->mb_width*s->mb_height,
590  sizeof(*s->macroblocks));
591  av_free(s->edge_emu_buffer_alloc);
592  s->edge_emu_buffer_alloc = av_malloc(16*stride*2);
593  s->edge_emu_buffer = s->edge_emu_buffer_alloc;
594  if (!s->above_blocks || !s->macroblocks || !s->edge_emu_buffer_alloc)
595  return AVERROR(ENOMEM);
596  if (s->flip < 0)
597  s->edge_emu_buffer += 15 * stride * 2;
598 
599  if (s->alpha_context)
600  return vp56_size_changed(s->alpha_context);
601 
602  return 0;
603 }
604 
605 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int);
606 
608  int *got_frame, AVPacket *avpkt)
609 {
610  const uint8_t *buf = avpkt->data;
611  VP56Context *s = avctx->priv_data;
612  AVFrame *const p = s->frames[VP56_FRAME_CURRENT];
613  int remaining_buf_size = avpkt->size;
614  int alpha_offset = remaining_buf_size;
615  int i, res;
616  int ret;
617 
618  if (s->has_alpha) {
619  if (remaining_buf_size < 3)
620  return AVERROR_INVALIDDATA;
621  alpha_offset = bytestream_get_be24(&buf);
622  remaining_buf_size -= 3;
623  if (remaining_buf_size < alpha_offset)
624  return AVERROR_INVALIDDATA;
625  }
626 
627  res = s->parse_header(s, buf, alpha_offset);
628  if (res < 0)
629  return res;
630 
631  if (res == VP56_SIZE_CHANGE) {
632  for (i = 0; i < 4; i++) {
633  av_frame_unref(s->frames[i]);
634  if (s->alpha_context)
635  av_frame_unref(s->alpha_context->frames[i]);
636  }
637  s->frames[VP56_FRAME_CURRENT]->flags |= AV_FRAME_FLAG_KEY; //FIXME
638  }
639 
641  if (ret < 0) {
642  if (res == VP56_SIZE_CHANGE)
643  ff_set_dimensions(avctx, 0, 0);
644  return ret;
645  }
646 
647  if (avctx->pix_fmt == AV_PIX_FMT_YUVA420P) {
648  if ((ret = av_frame_replace(s->alpha_context->frames[VP56_FRAME_CURRENT], p)) < 0) {
649  av_frame_unref(p);
650  if (res == VP56_SIZE_CHANGE)
651  ff_set_dimensions(avctx, 0, 0);
652  return ret;
653  }
654  }
655 
656  if (res == VP56_SIZE_CHANGE) {
657  if (vp56_size_changed(s)) {
658  av_frame_unref(p);
659  return AVERROR_INVALIDDATA;
660  }
661  }
662 
663  if (avctx->pix_fmt == AV_PIX_FMT_YUVA420P) {
664  int bak_w = avctx->width;
665  int bak_h = avctx->height;
666  int bak_cw = avctx->coded_width;
667  int bak_ch = avctx->coded_height;
668  buf += alpha_offset;
669  remaining_buf_size -= alpha_offset;
670 
671  res = s->alpha_context->parse_header(s->alpha_context, buf, remaining_buf_size);
672  if (res != 0) {
673  if(res==VP56_SIZE_CHANGE) {
674  av_log(avctx, AV_LOG_ERROR, "Alpha reconfiguration\n");
675  avctx->width = bak_w;
676  avctx->height = bak_h;
677  avctx->coded_width = bak_cw;
678  avctx->coded_height = bak_ch;
679  }
680  av_frame_unref(p);
681  return AVERROR_INVALIDDATA;
682  }
683  }
684 
685  s->discard_frame = 0;
686  avctx->execute2(avctx, ff_vp56_decode_mbs, 0, 0, (avctx->pix_fmt == AV_PIX_FMT_YUVA420P) + 1);
687 
688  if (s->discard_frame)
689  return AVERROR_INVALIDDATA;
690 
691  if ((res = av_frame_ref(rframe, p)) < 0)
692  return res;
693  *got_frame = 1;
694 
695  return avpkt->size;
696 }
697 
698 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *data,
699  int jobnr, int threadnr)
700 {
701  VP56Context *s0 = avctx->priv_data;
702  int is_alpha = (jobnr == 1);
703  VP56Context *s = is_alpha ? s0->alpha_context : s0;
704  AVFrame *const p = s->frames[VP56_FRAME_CURRENT];
705  int mb_row, mb_col, mb_row_flip, mb_offset = 0;
706  int block, y, uv;
707  ptrdiff_t stride_y, stride_uv;
708  int res;
709  int damaged = 0;
710 
711  if (p->flags & AV_FRAME_FLAG_KEY) {
713  s->default_models_init(s);
714  for (block=0; block<s->mb_height*s->mb_width; block++)
715  s->macroblocks[block].type = VP56_MB_INTRA;
716  } else {
719  s->parse_vector_models(s);
720  s->mb_type = VP56_MB_INTER_NOVEC_PF;
721  }
722 
723  if (s->parse_coeff_models(s))
724  goto next;
725 
726  if (s->interlaced) {
727  s->frames[VP56_FRAME_CURRENT]->flags |= AV_FRAME_FLAG_INTERLACED;
728  s->il_prob = vp56_rac_gets(&s->c, 8);
729  }
730 
731  memset(s->prev_dc, 0, sizeof(s->prev_dc));
732  s->prev_dc[1][VP56_FRAME_CURRENT] = 128;
733  s->prev_dc[2][VP56_FRAME_CURRENT] = 128;
734 
735  for (block=0; block < 4*s->mb_width+6; block++) {
736  s->above_blocks[block].ref_frame = VP56_FRAME_NONE;
737  s->above_blocks[block].dc_coeff = 0;
738  s->above_blocks[block].not_null_dc = 0;
739  }
740  s->above_blocks[2*s->mb_width + 2].ref_frame = VP56_FRAME_CURRENT;
741  s->above_blocks[3*s->mb_width + 4].ref_frame = VP56_FRAME_CURRENT;
742 
743  stride_y = p->linesize[0];
744  stride_uv = p->linesize[1];
745 
746  if (s->flip < 0)
747  mb_offset = 7;
748 
749  /* main macroblocks loop */
750  for (mb_row=0; mb_row<s->mb_height; mb_row++) {
751  if (s->flip < 0)
752  mb_row_flip = s->mb_height - mb_row - 1;
753  else
754  mb_row_flip = mb_row;
755 
756  for (block=0; block<4; block++) {
757  s->left_block[block].ref_frame = VP56_FRAME_NONE;
758  s->left_block[block].dc_coeff = 0;
759  s->left_block[block].not_null_dc = 0;
760  }
761  memset(s->coeff_ctx, 0, sizeof(s->coeff_ctx));
762  memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last));
763 
764  s->above_block_idx[0] = 1;
765  s->above_block_idx[1] = 2;
766  s->above_block_idx[2] = 1;
767  s->above_block_idx[3] = 2;
768  s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
769  s->above_block_idx[5] = 3*s->mb_width + 4 + 1;
770 
771  s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
772  s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
773  s->block_offset[1] = s->block_offset[0] + 8;
774  s->block_offset[3] = s->block_offset[2] + 8;
775  s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
776  s->block_offset[5] = s->block_offset[4];
777 
778  for (mb_col=0; mb_col<s->mb_width; mb_col++) {
779  if (!damaged) {
780  int ret = vp56_decode_mb(s, mb_row, mb_col, is_alpha);
781  if (ret < 0) {
782  damaged = 1;
783  if (!s->have_undamaged_frame || !avctx->error_concealment) {
784  s->discard_frame = 1;
785  return AVERROR_INVALIDDATA;
786  }
787  }
788  }
789  if (damaged)
790  vp56_conceal_mb(s, mb_row, mb_col, is_alpha);
791 
792  for (y=0; y<4; y++) {
793  s->above_block_idx[y] += 2;
794  s->block_offset[y] += 16;
795  }
796 
797  for (uv=4; uv<6; uv++) {
798  s->above_block_idx[uv] += 1;
799  s->block_offset[uv] += 8;
800  }
801  }
802  }
803 
804  if (!damaged)
805  s->have_undamaged_frame = 1;
806 
807 next:
808  if ((p->flags & AV_FRAME_FLAG_KEY) || s->golden_frame) {
809  if ((res = av_frame_replace(s->frames[VP56_FRAME_GOLDEN], p)) < 0)
810  return res;
811  }
812 
814  FFSWAP(AVFrame *, s->frames[VP56_FRAME_CURRENT],
815  s->frames[VP56_FRAME_PREVIOUS]);
816  return 0;
817 }
818 
819 av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s,
820  int flip, int has_alpha)
821 {
822  int i;
823 
824  s->avctx = avctx;
825  avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
826  if (avctx->skip_alpha) avctx->pix_fmt = AV_PIX_FMT_YUV420P;
827 
828  ff_h264chroma_init(&s->h264chroma, 8);
829  ff_hpeldsp_init(&s->hdsp, avctx->flags);
830  ff_videodsp_init(&s->vdsp, 8);
831  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
832  for (i = 0; i < 64; i++) {
833 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
834  s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]);
835 #undef TRANSPOSE
836  }
837 
838  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
839  s->frames[i] = av_frame_alloc();
840  if (!s->frames[i])
841  return AVERROR(ENOMEM);
842  }
843  s->edge_emu_buffer_alloc = NULL;
844 
845  s->above_blocks = NULL;
846  s->macroblocks = NULL;
847  s->quantizer = -1;
848  s->deblock_filtering = 1;
849  s->golden_frame = 0;
850 
851  s->filter = NULL;
852 
853  s->has_alpha = has_alpha;
854 
855  s->modelp = &s->model;
856 
857  if (flip) {
858  s->flip = -1;
859  s->frbi = 2;
860  s->srbi = 0;
861  } else {
862  s->flip = 1;
863  s->frbi = 0;
864  s->srbi = 2;
865  }
866 
867  return 0;
868 }
869 
870 av_cold int ff_vp56_free_context(VP56Context *s)
871 {
872  int i;
873 
874  av_freep(&s->above_blocks);
875  av_freep(&s->macroblocks);
876  av_freep(&s->edge_emu_buffer_alloc);
877 
878  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
879  av_frame_free(&s->frames[i]);
880 
881  return 0;
882 }
vp56_rac_get_tree
static av_always_inline int vp56_rac_get_tree(VPXRangeCoder *c, const VP56Tree *tree, const uint8_t *probs)
Definition: vp56.h:249
vp56_add_predictors_dc
static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
Definition: vp56.c:283
vp56_conceal_mb
static int vp56_conceal_mb(VP56Context *s, int row, int col, int is_alpha)
Definition: vp56.c:548
vp56_parse_mb_type_models
static void vp56_parse_mb_type_models(VP56Context *s)
Definition: vp56.c:83
VP56mv::x
int16_t x
Definition: vp56.h:68
vp56data.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
VP56_MB_INTER_DELTA_GF
@ VP56_MB_INTER_DELTA_GF
Inter MB, above/left vector + delta, from golden frame.
Definition: vp56.h:56
ff_vp3dsp_idct10_put
void ff_vp3dsp_idct10_put(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Definition: vp3dsp.c:341
ff_vp3dsp_idct10_add
void ff_vp3dsp_idct10_add(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Definition: vp3dsp.c:347
VP56Model::mb_types_stats
uint8_t mb_types_stats[3][10][2]
Definition: vp56.h:112
VP56_MB_INTER_V2_GF
@ VP56_MB_INTER_V2_GF
Inter MB, second vector, from golden frame.
Definition: vp56.h:59
ff_vp56_decode_mbs
static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int)
Definition: vp56.c:698
vp56_conceal_mv
static VP56mb vp56_conceal_mv(VP56Context *s, int row, int col)
Definition: vp56.c:264
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
ff_vp56_pmbtm_tree
const VP56Tree ff_vp56_pmbtm_tree[]
Definition: vp56data.c:219
ff_vp56_reference_frame
const VP56Frame ff_vp56_reference_frame[]
Definition: vp56data.c:70
mask
int mask
Definition: mediacodecdec_common.c:154
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
vp56_decode_4mv
static void vp56_decode_4mv(VP56Context *s, int row, int col)
Definition: vp56.c:166
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVPacket::data
uint8_t * data
Definition: packet.h:539
ff_vp3dsp_set_bounding_values
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:477
ff_vp56_init_context
av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s, int flip, int has_alpha)
Initializes an VP56Context.
Definition: vp56.c:819
VP56RefDc::ref_frame
VP56Frame ref_frame
Definition: vp56.h:88
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:149
ff_vp56_pmbt_tree
const VP56Tree ff_vp56_pmbt_tree[]
Definition: vp56data.c:228
TRANSPOSE
#define TRANSPOSE(x)
vp56_mc
static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src, ptrdiff_t stride, int x, int y, ptrdiff_t ref_stride)
Definition: vp56.c:340
ff_vp56_filter_threshold
const uint8_t ff_vp56_filter_threshold[]
Definition: vp56data.c:204
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:661
vp56_size_changed
static int vp56_size_changed(VP56Context *s)
Definition: vp56.c:562
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
VPXRangeCoder
Definition: vpx_rac.h:35
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
VP56_MB_INTRA
@ VP56_MB_INTRA
Intra MB.
Definition: vp56.h:51
VP56RefDc
Definition: vp56.h:86
mvp
static void mvp(const VVCLocalContext *lc, const int mvp_lx_flag, const int lx, const int8_t *ref_idx, const int amvr_shift, Mv *mv)
Definition: mvs.c:1580
VP56_MB_INTER_NOVEC_PF
@ VP56_MB_INTER_NOVEC_PF
Inter MB, no vector, from previous frame.
Definition: vp56.h:50
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:639
ff_vp56_b6to4
const uint8_t ff_vp56_b6to4[]
Definition: vp56data.c:29
vp56_deblock_filter
static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, ptrdiff_t stride, int dx, int dy)
Definition: vp56.c:324
VP56_MB_INTER_V1_PF
@ VP56_MB_INTER_V1_PF
Inter MB, first vector, from previous frame.
Definition: vp56.h:53
vp56_idct_add
static void vp56_idct_add(VP56Context *s, uint8_t *dest, ptrdiff_t stride, int16_t *block, int selector)
Definition: vp56.c:429
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
VP56_SIZE_CHANGE
#define VP56_SIZE_CHANGE
Definition: vp56.h:72
VP56RefDc::dc_coeff
int16_t dc_coeff
Definition: vp56.h:89
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
ff_vp56_dc_dequant
const uint8_t ff_vp56_dc_dequant[64]
Definition: vp56data.c:94
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
VP56Model::mb_type
uint8_t mb_type[3][10][10]
Definition: vp56.h:111
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
vp56_get_vectors_predictors
static int vp56_get_vectors_predictors(VP56Context *s, int row, int col, VP56Frame ref_frame)
Definition: vp56.c:45
VP56_MB_INTER_4V
@ VP56_MB_INTER_4V
Inter MB, 4 vectors, from previous frame.
Definition: vp56.h:57
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:431
AVCodecContext::error_concealment
int error_concealment
error concealment flags
Definition: avcodec.h:1396
ctx
AVFormatContext * ctx
Definition: movenc.c:49
decode.h
ff_vp56_free_context
av_cold int ff_vp56_free_context(VP56Context *s)
Definition: vp56.c:870
VP56mv::y
int16_t y
Definition: vp56.h:69
VP56mv
Definition: vp56.h:67
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
RSHIFT
#define RSHIFT(a, b)
Definition: common.h:56
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
vp56.h
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
NULL
#define NULL
Definition: coverity.c:32
VP56_MB_INTER_DELTA_PF
@ VP56_MB_INTER_DELTA_PF
Inter MB, above/left vector + delta, from previous frame.
Definition: vp56.h:52
ff_vp3dsp_h_loop_filter_12
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
ff_vp56_mb_type_model_model
const uint8_t ff_vp56_mb_type_model_model[]
Definition: vp56data.c:215
VP56_MB_INTER_NOVEC_GF
@ VP56_MB_INTER_NOVEC_GF
Inter MB, no vector, from golden frame.
Definition: vp56.h:55
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_vp56_pre_def_mb_type_stats
const uint8_t ff_vp56_pre_def_mb_type_stats[16][3][10][2]
Definition: vp56data.c:105
VP56_MB_INTER_V2_PF
@ VP56_MB_INTER_V2_PF
Inter MB, second vector, from previous frame.
Definition: vp56.h:54
vp56_parse_mb_type
static VP56mb vp56_parse_mb_type(VP56Context *s, VP56mb prev_type, int ctx)
Definition: vp56.c:154
vp56_render_mb
static av_always_inline void vp56_render_mb(VP56Context *s, int row, int col, int is_alpha, VP56mb mb_type)
Definition: vp56.c:439
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_vp3dsp_v_loop_filter_12
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:220
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:491
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1697
flip
static void flip(AVCodecContext *avctx, AVFrame *frame)
Definition: rawdec.c:131
AVPacket::size
int size
Definition: packet.h:540
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:388
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
ref_frame
static int ref_frame(VVCFrame *dst, const VVCFrame *src)
Definition: dec.c:595
h264chroma.h
AVCodecContext::skip_alpha
int skip_alpha
Skip processing alpha if supported by codec.
Definition: avcodec.h:1844
VP56_FRAME_NONE
@ VP56_FRAME_NONE
Definition: vp56.h:43
av_reallocp_array
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate an array through a pointer to a pointer.
Definition: mem.c:225
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
vp56_rac_gets
static int vp56_rac_gets(VPXRangeCoder *c, int bits)
vp56 specific range coder implementation
Definition: vp56.h:230
VP56_MB_INTER_V1_GF
@ VP56_MB_INTER_V1_GF
Inter MB, first vector, from golden frame.
Definition: vp56.h:58
AV_CODEC_ID_VP5
@ AV_CODEC_ID_VP5
Definition: codec_id.h:142
ff_vp3dsp_init
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:448
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
vpx_rac.h
VP56_FRAME_GOLDEN
@ VP56_FRAME_GOLDEN
Definition: vp56.h:46
delta
float delta
Definition: vorbis_enc_data.h:430
av_always_inline
#define av_always_inline
Definition: attributes.h:49
vpx_rac_get_prob_branchy
static av_always_inline int vpx_rac_get_prob_branchy(VPXRangeCoder *c, int prob)
Definition: vpx_rac.h:99
VP56mb
VP56mb
Definition: vp56.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:648
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
frame_ref
static int frame_ref(void *dst, void *src)
Definition: container_fifo.c:180
ret
ret
Definition: filter_design.txt:187
VP56Frame
VP56Frame
Definition: vp56.h:42
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
prob
#define prob(name, subs,...)
Definition: cbs_vp9.c:325
pos
unsigned int pos
Definition: spdifenc.c:414
ff_vp56_decode_frame
int ff_vp56_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
Definition: vp56.c:607
ff_vp56_candidate_predictor_pos
const int8_t ff_vp56_candidate_predictor_pos[12][2]
Definition: vp56data.c:241
VP56_FRAME_CURRENT
@ VP56_FRAME_CURRENT
Definition: vp56.h:44
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:487
AVCodecContext
main external API structure.
Definition: avcodec.h:451
ff_vp56_b2p
const uint8_t ff_vp56_b2p[]
Definition: vp56data.c:28
VP56Model
Definition: vp56.h:97
VP56_FRAME_PREVIOUS
@ VP56_FRAME_PREVIOUS
Definition: vp56.h:45
ff_vp56_init_dequant
void ff_vp56_init_dequant(VP56Context *s, int quantizer)
Definition: vp56.c:36
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
ff_vp56_ac_dequant
const uint8_t ff_vp56_ac_dequant[64]
Definition: vp56data.c:83
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:639
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
vpx_rac_get
static av_always_inline int vpx_rac_get(VPXRangeCoder *c)
Definition: vpx_rac.h:117
mem.h
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AVPacket
This structure stores compressed data.
Definition: packet.h:516
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
vp56_idct_put
static void vp56_idct_put(VP56Context *s, uint8_t *dest, ptrdiff_t stride, int16_t *block, int selector)
Definition: vp56.c:421
vp56_decode_mv
static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
Definition: vp56.c:207
vp56_decode_mb
static int vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
Definition: vp56.c:516
vpx_rac_get_prob
#define vpx_rac_get_prob
Definition: vpx_rac.h:82
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1642
src
#define src
Definition: vp8dsp.c:248