FFmpeg
cavs.c
Go to the documentation of this file.
1 /*
2  * Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
3  * Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Chinese AVS video (AVS1-P2, JiZhun profile) decoder
25  * @author Stefan Gehrer <stefan.gehrer@gmx.de>
26  */
27 
28 #include "avcodec.h"
29 #include "get_bits.h"
30 #include "golomb.h"
31 #include "h264chroma.h"
32 #include "idctdsp.h"
33 #include "internal.h"
34 #include "mathops.h"
35 #include "qpeldsp.h"
36 #include "cavs.h"
37 
38 static const uint8_t alpha_tab[64] = {
39  0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3,
40  4, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 18, 20,
41  22, 24, 26, 28, 30, 33, 33, 35, 35, 36, 37, 37, 39, 39, 42, 44,
42  46, 48, 50, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
43 };
44 
45 static const uint8_t beta_tab[64] = {
46  0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
47  2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6,
48  6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 14,
49  15, 16, 17, 18, 19, 20, 21, 22, 23, 23, 24, 24, 25, 25, 26, 27
50 };
51 
52 static const uint8_t tc_tab[64] = {
53  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
54  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
55  2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4,
56  5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9
57 };
58 
59 /** mark block as unavailable, i.e. out of picture
60  * or not yet decoded */
61 static const cavs_vector un_mv = { 0, 0, 1, NOT_AVAIL };
62 
63 static const int8_t left_modifier_l[8] = { 0, -1, 6, -1, -1, 7, 6, 7 };
64 static const int8_t top_modifier_l[8] = { -1, 1, 5, -1, -1, 5, 7, 7 };
65 static const int8_t left_modifier_c[7] = { 5, -1, 2, -1, 6, 5, 6 };
66 static const int8_t top_modifier_c[7] = { 4, 1, -1, -1, 4, 6, 6 };
67 
68 /*****************************************************************************
69  *
70  * in-loop deblocking filter
71  *
72  ****************************************************************************/
73 
74 static inline int get_bs(cavs_vector *mvP, cavs_vector *mvQ, int b)
75 {
76  if ((mvP->ref == REF_INTRA) || (mvQ->ref == REF_INTRA))
77  return 2;
78  if((abs(mvP->x - mvQ->x) >= 4) ||
79  (abs(mvP->y - mvQ->y) >= 4) ||
80  (mvP->ref != mvQ->ref))
81  return 1;
82  if (b) {
83  mvP += MV_BWD_OFFS;
84  mvQ += MV_BWD_OFFS;
85  if((abs(mvP->x - mvQ->x) >= 4) ||
86  (abs(mvP->y - mvQ->y) >= 4) ||
87  (mvP->ref != mvQ->ref))
88  return 1;
89  }
90  return 0;
91 }
92 
93 #define SET_PARAMS \
94  alpha = alpha_tab[av_clip_uintp2(qp_avg + h->alpha_offset, 6)]; \
95  beta = beta_tab[av_clip_uintp2(qp_avg + h->beta_offset, 6)]; \
96  tc = tc_tab[av_clip_uintp2(qp_avg + h->alpha_offset, 6)];
97 
98 /**
99  * in-loop deblocking filter for a single macroblock
100  *
101  * boundary strength (bs) mapping:
102  *
103  * --4---5--
104  * 0 2 |
105  * | 6 | 7 |
106  * 1 3 |
107  * ---------
108  */
109 void ff_cavs_filter(AVSContext *h, enum cavs_mb mb_type)
110 {
111  uint8_t bs[8];
112  int qp_avg, alpha, beta, tc;
113  int i;
114 
115  /* save un-deblocked lines */
116  h->topleft_border_y = h->top_border_y[h->mbx * 16 + 15];
117  h->topleft_border_u = h->top_border_u[h->mbx * 10 + 8];
118  h->topleft_border_v = h->top_border_v[h->mbx * 10 + 8];
119  memcpy(&h->top_border_y[h->mbx * 16], h->cy + 15 * h->l_stride, 16);
120  memcpy(&h->top_border_u[h->mbx * 10 + 1], h->cu + 7 * h->c_stride, 8);
121  memcpy(&h->top_border_v[h->mbx * 10 + 1], h->cv + 7 * h->c_stride, 8);
122  for (i = 0; i < 8; i++) {
123  h->left_border_y[i * 2 + 1] = *(h->cy + 15 + (i * 2 + 0) * h->l_stride);
124  h->left_border_y[i * 2 + 2] = *(h->cy + 15 + (i * 2 + 1) * h->l_stride);
125  h->left_border_u[i + 1] = *(h->cu + 7 + i * h->c_stride);
126  h->left_border_v[i + 1] = *(h->cv + 7 + i * h->c_stride);
127  }
128  if (!h->loop_filter_disable) {
129  /* determine bs */
130  if (mb_type == I_8X8)
131  memset(bs, 2, 8);
132  else {
133  memset(bs, 0, 8);
134  if (ff_cavs_partition_flags[mb_type] & SPLITV) {
135  bs[2] = get_bs(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X1], mb_type > P_8X8);
136  bs[3] = get_bs(&h->mv[MV_FWD_X2], &h->mv[MV_FWD_X3], mb_type > P_8X8);
137  }
138  if (ff_cavs_partition_flags[mb_type] & SPLITH) {
139  bs[6] = get_bs(&h->mv[MV_FWD_X0], &h->mv[MV_FWD_X2], mb_type > P_8X8);
140  bs[7] = get_bs(&h->mv[MV_FWD_X1], &h->mv[MV_FWD_X3], mb_type > P_8X8);
141  }
142  bs[0] = get_bs(&h->mv[MV_FWD_A1], &h->mv[MV_FWD_X0], mb_type > P_8X8);
143  bs[1] = get_bs(&h->mv[MV_FWD_A3], &h->mv[MV_FWD_X2], mb_type > P_8X8);
144  bs[4] = get_bs(&h->mv[MV_FWD_B2], &h->mv[MV_FWD_X0], mb_type > P_8X8);
145  bs[5] = get_bs(&h->mv[MV_FWD_B3], &h->mv[MV_FWD_X1], mb_type > P_8X8);
146  }
147  if (AV_RN64(bs)) {
148  if (h->flags & A_AVAIL) {
149  qp_avg = (h->qp + h->left_qp + 1) >> 1;
150  SET_PARAMS;
151  h->cdsp.cavs_filter_lv(h->cy, h->l_stride, alpha, beta, tc, bs[0], bs[1]);
152  qp_avg = (ff_cavs_chroma_qp[h->qp] + ff_cavs_chroma_qp[h->left_qp] + 1) >> 1;
153  SET_PARAMS;
154  h->cdsp.cavs_filter_cv(h->cu, h->c_stride, alpha, beta, tc, bs[0], bs[1]);
155  h->cdsp.cavs_filter_cv(h->cv, h->c_stride, alpha, beta, tc, bs[0], bs[1]);
156  }
157  qp_avg = h->qp;
158  SET_PARAMS;
159  h->cdsp.cavs_filter_lv(h->cy + 8, h->l_stride, alpha, beta, tc, bs[2], bs[3]);
160  h->cdsp.cavs_filter_lh(h->cy + 8 * h->l_stride, h->l_stride, alpha, beta, tc, bs[6], bs[7]);
161 
162  if (h->flags & B_AVAIL) {
163  qp_avg = (h->qp + h->top_qp[h->mbx] + 1) >> 1;
164  SET_PARAMS;
165  h->cdsp.cavs_filter_lh(h->cy, h->l_stride, alpha, beta, tc, bs[4], bs[5]);
166  qp_avg = (ff_cavs_chroma_qp[h->qp] + ff_cavs_chroma_qp[h->top_qp[h->mbx]] + 1) >> 1;
167  SET_PARAMS;
168  h->cdsp.cavs_filter_ch(h->cu, h->c_stride, alpha, beta, tc, bs[4], bs[5]);
169  h->cdsp.cavs_filter_ch(h->cv, h->c_stride, alpha, beta, tc, bs[4], bs[5]);
170  }
171  }
172  }
173  h->left_qp = h->qp;
174  h->top_qp[h->mbx] = h->qp;
175 }
176 
177 #undef SET_PARAMS
178 
179 /*****************************************************************************
180  *
181  * spatial intra prediction
182  *
183  ****************************************************************************/
184 
186  uint8_t **left, int block)
187 {
188  int i;
189 
190  switch (block) {
191  case 0:
192  *left = h->left_border_y;
193  h->left_border_y[0] = h->left_border_y[1];
194  memset(&h->left_border_y[17], h->left_border_y[16], 9);
195  memcpy(&top[1], &h->top_border_y[h->mbx * 16], 16);
196  top[17] = top[16];
197  top[0] = top[1];
198  if ((h->flags & A_AVAIL) && (h->flags & B_AVAIL))
199  h->left_border_y[0] = top[0] = h->topleft_border_y;
200  break;
201  case 1:
202  *left = h->intern_border_y;
203  for (i = 0; i < 8; i++)
204  h->intern_border_y[i + 1] = *(h->cy + 7 + i * h->l_stride);
205  memset(&h->intern_border_y[9], h->intern_border_y[8], 9);
206  h->intern_border_y[0] = h->intern_border_y[1];
207  memcpy(&top[1], &h->top_border_y[h->mbx * 16 + 8], 8);
208  if (h->flags & C_AVAIL)
209  memcpy(&top[9], &h->top_border_y[(h->mbx + 1) * 16], 8);
210  else
211  memset(&top[9], top[8], 9);
212  top[17] = top[16];
213  top[0] = top[1];
214  if (h->flags & B_AVAIL)
215  h->intern_border_y[0] = top[0] = h->top_border_y[h->mbx * 16 + 7];
216  break;
217  case 2:
218  *left = &h->left_border_y[8];
219  memcpy(&top[1], h->cy + 7 * h->l_stride, 16);
220  top[17] = top[16];
221  top[0] = top[1];
222  if (h->flags & A_AVAIL)
223  top[0] = h->left_border_y[8];
224  break;
225  case 3:
226  *left = &h->intern_border_y[8];
227  for (i = 0; i < 8; i++)
228  h->intern_border_y[i + 9] = *(h->cy + 7 + (i + 8) * h->l_stride);
229  memset(&h->intern_border_y[17], h->intern_border_y[16], 9);
230  memcpy(&top[0], h->cy + 7 + 7 * h->l_stride, 9);
231  memset(&top[9], top[8], 9);
232  break;
233  }
234 }
235 
237 {
238  /* extend borders by one pixel */
239  h->left_border_u[9] = h->left_border_u[8];
240  h->left_border_v[9] = h->left_border_v[8];
241  if(h->flags & C_AVAIL) {
242  h->top_border_u[h->mbx*10 + 9] = h->top_border_u[h->mbx*10 + 11];
243  h->top_border_v[h->mbx*10 + 9] = h->top_border_v[h->mbx*10 + 11];
244  } else {
245  h->top_border_u[h->mbx * 10 + 9] = h->top_border_u[h->mbx * 10 + 8];
246  h->top_border_v[h->mbx * 10 + 9] = h->top_border_v[h->mbx * 10 + 8];
247  }
248  if((h->flags & A_AVAIL) && (h->flags & B_AVAIL)) {
249  h->top_border_u[h->mbx * 10] = h->left_border_u[0] = h->topleft_border_u;
250  h->top_border_v[h->mbx * 10] = h->left_border_v[0] = h->topleft_border_v;
251  } else {
252  h->left_border_u[0] = h->left_border_u[1];
253  h->left_border_v[0] = h->left_border_v[1];
254  h->top_border_u[h->mbx * 10] = h->top_border_u[h->mbx * 10 + 1];
255  h->top_border_v[h->mbx * 10] = h->top_border_v[h->mbx * 10 + 1];
256  }
257 }
258 
259 static void intra_pred_vert(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
260 {
261  int y;
262  uint64_t a = AV_RN64(&top[1]);
263  for (y = 0; y < 8; y++)
264  *((uint64_t *)(d + y * stride)) = a;
265 }
266 
267 static void intra_pred_horiz(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
268 {
269  int y;
270  uint64_t a;
271  for (y = 0; y < 8; y++) {
272  a = left[y + 1] * 0x0101010101010101ULL;
273  *((uint64_t *)(d + y * stride)) = a;
274  }
275 }
276 
277 static void intra_pred_dc_128(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
278 {
279  int y;
280  uint64_t a = 0x8080808080808080ULL;
281  for (y = 0; y < 8; y++)
282  *((uint64_t *)(d + y * stride)) = a;
283 }
284 
285 static void intra_pred_plane(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
286 {
287  int x, y, ia;
288  int ih = 0;
289  int iv = 0;
290  const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
291 
292  for (x = 0; x < 4; x++) {
293  ih += (x + 1) * (top[5 + x] - top[3 - x]);
294  iv += (x + 1) * (left[5 + x] - left[3 - x]);
295  }
296  ia = (top[8] + left[8]) << 4;
297  ih = (17 * ih + 16) >> 5;
298  iv = (17 * iv + 16) >> 5;
299  for (y = 0; y < 8; y++)
300  for (x = 0; x < 8; x++)
301  d[y * stride + x] = cm[(ia + (x - 3) * ih + (y - 3) * iv + 16) >> 5];
302 }
303 
304 #define LOWPASS(ARRAY, INDEX) \
305  ((ARRAY[(INDEX) - 1] + 2 * ARRAY[(INDEX)] + ARRAY[(INDEX) + 1] + 2) >> 2)
306 
307 static void intra_pred_lp(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
308 {
309  int x, y;
310  for (y = 0; y < 8; y++)
311  for (x = 0; x < 8; x++)
312  d[y * stride + x] = (LOWPASS(top, x + 1) + LOWPASS(left, y + 1)) >> 1;
313 }
314 
315 static void intra_pred_down_left(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
316 {
317  int x, y;
318  for (y = 0; y < 8; y++)
319  for (x = 0; x < 8; x++)
320  d[y * stride + x] = (LOWPASS(top, x + y + 2) + LOWPASS(left, x + y + 2)) >> 1;
321 }
322 
323 static void intra_pred_down_right(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
324 {
325  int x, y;
326  for (y = 0; y < 8; y++)
327  for (x = 0; x < 8; x++)
328  if (x == y)
329  d[y * stride + x] = (left[1] + 2 * top[0] + top[1] + 2) >> 2;
330  else if (x > y)
331  d[y * stride + x] = LOWPASS(top, x - y);
332  else
333  d[y * stride + x] = LOWPASS(left, y - x);
334 }
335 
336 static void intra_pred_lp_left(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
337 {
338  int x, y;
339  for (y = 0; y < 8; y++)
340  for (x = 0; x < 8; x++)
341  d[y * stride + x] = LOWPASS(left, y + 1);
342 }
343 
344 static void intra_pred_lp_top(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
345 {
346  int x, y;
347  for (y = 0; y < 8; y++)
348  for (x = 0; x < 8; x++)
349  d[y * stride + x] = LOWPASS(top, x + 1);
350 }
351 
352 #undef LOWPASS
353 
354 static inline void modify_pred(const int8_t *mod_table, int *mode)
355 {
356  *mode = mod_table[*mode];
357  if (*mode < 0) {
358  av_log(NULL, AV_LOG_ERROR, "Illegal intra prediction mode\n");
359  *mode = 0;
360  }
361 }
362 
363 void ff_cavs_modify_mb_i(AVSContext *h, int *pred_mode_uv)
364 {
365  /* save pred modes before they get modified */
366  h->pred_mode_Y[3] = h->pred_mode_Y[5];
367  h->pred_mode_Y[6] = h->pred_mode_Y[8];
368  h->top_pred_Y[h->mbx * 2 + 0] = h->pred_mode_Y[7];
369  h->top_pred_Y[h->mbx * 2 + 1] = h->pred_mode_Y[8];
370 
371  /* modify pred modes according to availability of neighbour samples */
372  if (!(h->flags & A_AVAIL)) {
373  modify_pred(left_modifier_l, &h->pred_mode_Y[4]);
374  modify_pred(left_modifier_l, &h->pred_mode_Y[7]);
375  modify_pred(left_modifier_c, pred_mode_uv);
376  }
377  if (!(h->flags & B_AVAIL)) {
378  modify_pred(top_modifier_l, &h->pred_mode_Y[4]);
379  modify_pred(top_modifier_l, &h->pred_mode_Y[5]);
380  modify_pred(top_modifier_c, pred_mode_uv);
381  }
382 }
383 
384 /*****************************************************************************
385  *
386  * motion compensation
387  *
388  ****************************************************************************/
389 
390 static inline void mc_dir_part(AVSContext *h, AVFrame *pic, int chroma_height,
391  int delta, int list, uint8_t *dest_y,
392  uint8_t *dest_cb, uint8_t *dest_cr,
393  int src_x_offset, int src_y_offset,
394  qpel_mc_func *qpix_op,
395  h264_chroma_mc_func chroma_op, cavs_vector *mv)
396 {
397  const int mx = mv->x + src_x_offset * 8;
398  const int my = mv->y + src_y_offset * 8;
399  const int luma_xy = (mx & 3) + ((my & 3) << 2);
400  uint8_t *src_y = pic->data[0] + (mx >> 2) + (my >> 2) * h->l_stride;
401  uint8_t *src_cb = pic->data[1] + (mx >> 3) + (my >> 3) * h->c_stride;
402  uint8_t *src_cr = pic->data[2] + (mx >> 3) + (my >> 3) * h->c_stride;
403  int extra_width = 0;
404  int extra_height = extra_width;
405  const int full_mx = mx >> 2;
406  const int full_my = my >> 2;
407  const int pic_width = 16 * h->mb_width;
408  const int pic_height = 16 * h->mb_height;
409  int emu = 0;
410 
411  if (!pic->data[0])
412  return;
413  if (mx & 7)
414  extra_width -= 3;
415  if (my & 7)
416  extra_height -= 3;
417 
418  if (full_mx < 0 - extra_width ||
419  full_my < 0 - extra_height ||
420  full_mx + 16 /* FIXME */ > pic_width + extra_width ||
421  full_my + 16 /* FIXME */ > pic_height + extra_height) {
422  h->vdsp.emulated_edge_mc(h->edge_emu_buffer,
423  src_y - 2 - 2 * h->l_stride,
424  h->l_stride, h->l_stride,
425  16 + 5, 16 + 5 /* FIXME */,
426  full_mx - 2, full_my - 2,
427  pic_width, pic_height);
428  src_y = h->edge_emu_buffer + 2 + 2 * h->l_stride;
429  emu = 1;
430  }
431 
432  // FIXME try variable height perhaps?
433  qpix_op[luma_xy](dest_y, src_y, h->l_stride);
434 
435  if (emu) {
436  h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb,
437  h->c_stride, h->c_stride,
438  9, 9 /* FIXME */,
439  mx >> 3, my >> 3,
440  pic_width >> 1, pic_height >> 1);
441  src_cb = h->edge_emu_buffer;
442  }
443  chroma_op(dest_cb, src_cb, h->c_stride, chroma_height, mx & 7, my & 7);
444 
445  if (emu) {
446  h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr,
447  h->c_stride, h->c_stride,
448  9, 9 /* FIXME */,
449  mx >> 3, my >> 3,
450  pic_width >> 1, pic_height >> 1);
451  src_cr = h->edge_emu_buffer;
452  }
453  chroma_op(dest_cr, src_cr, h->c_stride, chroma_height, mx & 7, my & 7);
454 }
455 
456 static inline void mc_part_std(AVSContext *h, int chroma_height, int delta,
457  uint8_t *dest_y,
458  uint8_t *dest_cb,
459  uint8_t *dest_cr,
460  int x_offset, int y_offset,
461  qpel_mc_func *qpix_put,
462  h264_chroma_mc_func chroma_put,
463  qpel_mc_func *qpix_avg,
464  h264_chroma_mc_func chroma_avg,
465  cavs_vector *mv)
466 {
467  qpel_mc_func *qpix_op = qpix_put;
468  h264_chroma_mc_func chroma_op = chroma_put;
469 
470  dest_y += x_offset * 2 + y_offset * h->l_stride * 2;
471  dest_cb += x_offset + y_offset * h->c_stride;
472  dest_cr += x_offset + y_offset * h->c_stride;
473  x_offset += 8 * h->mbx;
474  y_offset += 8 * h->mby;
475 
476  if (mv->ref >= 0) {
477  AVFrame *ref = h->DPB[mv->ref].f;
478  mc_dir_part(h, ref, chroma_height, delta, 0,
479  dest_y, dest_cb, dest_cr, x_offset, y_offset,
480  qpix_op, chroma_op, mv);
481 
482  qpix_op = qpix_avg;
483  chroma_op = chroma_avg;
484  }
485 
486  if ((mv + MV_BWD_OFFS)->ref >= 0) {
487  AVFrame *ref = h->DPB[0].f;
488  mc_dir_part(h, ref, chroma_height, delta, 1,
489  dest_y, dest_cb, dest_cr, x_offset, y_offset,
490  qpix_op, chroma_op, mv + MV_BWD_OFFS);
491  }
492 }
493 
494 void ff_cavs_inter(AVSContext *h, enum cavs_mb mb_type)
495 {
496  if (ff_cavs_partition_flags[mb_type] == 0) { // 16x16
497  mc_part_std(h, 8, 0, h->cy, h->cu, h->cv, 0, 0,
498  h->cdsp.put_cavs_qpel_pixels_tab[0],
499  h->h264chroma.put_h264_chroma_pixels_tab[0],
500  h->cdsp.avg_cavs_qpel_pixels_tab[0],
501  h->h264chroma.avg_h264_chroma_pixels_tab[0],
502  &h->mv[MV_FWD_X0]);
503  } else {
504  mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 0, 0,
505  h->cdsp.put_cavs_qpel_pixels_tab[1],
506  h->h264chroma.put_h264_chroma_pixels_tab[1],
507  h->cdsp.avg_cavs_qpel_pixels_tab[1],
508  h->h264chroma.avg_h264_chroma_pixels_tab[1],
509  &h->mv[MV_FWD_X0]);
510  mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 4, 0,
511  h->cdsp.put_cavs_qpel_pixels_tab[1],
512  h->h264chroma.put_h264_chroma_pixels_tab[1],
513  h->cdsp.avg_cavs_qpel_pixels_tab[1],
514  h->h264chroma.avg_h264_chroma_pixels_tab[1],
515  &h->mv[MV_FWD_X1]);
516  mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 0, 4,
517  h->cdsp.put_cavs_qpel_pixels_tab[1],
518  h->h264chroma.put_h264_chroma_pixels_tab[1],
519  h->cdsp.avg_cavs_qpel_pixels_tab[1],
520  h->h264chroma.avg_h264_chroma_pixels_tab[1],
521  &h->mv[MV_FWD_X2]);
522  mc_part_std(h, 4, 0, h->cy, h->cu, h->cv, 4, 4,
523  h->cdsp.put_cavs_qpel_pixels_tab[1],
524  h->h264chroma.put_h264_chroma_pixels_tab[1],
525  h->cdsp.avg_cavs_qpel_pixels_tab[1],
526  h->h264chroma.avg_h264_chroma_pixels_tab[1],
527  &h->mv[MV_FWD_X3]);
528  }
529 }
530 
531 /*****************************************************************************
532  *
533  * motion vector prediction
534  *
535  ****************************************************************************/
536 
537 static inline void scale_mv(AVSContext *h, int *d_x, int *d_y,
538  cavs_vector *src, int distp)
539 {
540  int64_t den = h->scale_den[FFMAX(src->ref, 0)];
541  *d_x = (src->x * distp * den + 256 + FF_SIGNBIT(src->x)) >> 9;
542  *d_y = (src->y * distp * den + 256 + FF_SIGNBIT(src->y)) >> 9;
543 }
544 
545 static inline void mv_pred_median(AVSContext *h,
546  cavs_vector *mvP,
547  cavs_vector *mvA,
548  cavs_vector *mvB,
549  cavs_vector *mvC)
550 {
551  int ax, ay, bx, by, cx, cy;
552  int len_ab, len_bc, len_ca, len_mid;
553 
554  /* scale candidates according to their temporal span */
555  scale_mv(h, &ax, &ay, mvA, mvP->dist);
556  scale_mv(h, &bx, &by, mvB, mvP->dist);
557  scale_mv(h, &cx, &cy, mvC, mvP->dist);
558  /* find the geometrical median of the three candidates */
559  len_ab = abs(ax - bx) + abs(ay - by);
560  len_bc = abs(bx - cx) + abs(by - cy);
561  len_ca = abs(cx - ax) + abs(cy - ay);
562  len_mid = mid_pred(len_ab, len_bc, len_ca);
563  if (len_mid == len_ab) {
564  mvP->x = cx;
565  mvP->y = cy;
566  } else if (len_mid == len_bc) {
567  mvP->x = ax;
568  mvP->y = ay;
569  } else {
570  mvP->x = bx;
571  mvP->y = by;
572  }
573 }
574 
576  enum cavs_mv_pred mode, enum cavs_block size, int ref)
577 {
578  cavs_vector *mvP = &h->mv[nP];
579  cavs_vector *mvA = &h->mv[nP-1];
580  cavs_vector *mvB = &h->mv[nP-4];
581  cavs_vector *mvC = &h->mv[nC];
582  const cavs_vector *mvP2 = NULL;
583 
584  mvP->ref = ref;
585  mvP->dist = h->dist[mvP->ref];
586  if (mvC->ref == NOT_AVAIL || (nP == MV_FWD_X3) || (nP == MV_BWD_X3 ))
587  mvC = &h->mv[nP - 5]; // set to top-left (mvD)
588  if (mode == MV_PRED_PSKIP &&
589  (mvA->ref == NOT_AVAIL ||
590  mvB->ref == NOT_AVAIL ||
591  (mvA->x | mvA->y | mvA->ref) == 0 ||
592  (mvB->x | mvB->y | mvB->ref) == 0)) {
593  mvP2 = &un_mv;
594  /* if there is only one suitable candidate, take it */
595  } else if (mvA->ref >= 0 && mvB->ref < 0 && mvC->ref < 0) {
596  mvP2 = mvA;
597  } else if (mvA->ref < 0 && mvB->ref >= 0 && mvC->ref < 0) {
598  mvP2 = mvB;
599  } else if (mvA->ref < 0 && mvB->ref < 0 && mvC->ref >= 0) {
600  mvP2 = mvC;
601  } else if (mode == MV_PRED_LEFT && mvA->ref == ref) {
602  mvP2 = mvA;
603  } else if (mode == MV_PRED_TOP && mvB->ref == ref) {
604  mvP2 = mvB;
605  } else if (mode == MV_PRED_TOPRIGHT && mvC->ref == ref) {
606  mvP2 = mvC;
607  }
608  if (mvP2) {
609  mvP->x = mvP2->x;
610  mvP->y = mvP2->y;
611  } else
612  mv_pred_median(h, mvP, mvA, mvB, mvC);
613 
614  if (mode < MV_PRED_PSKIP) {
615  int mx = get_se_golomb(&h->gb) + (unsigned)mvP->x;
616  int my = get_se_golomb(&h->gb) + (unsigned)mvP->y;
617 
618  if (mx != (int16_t)mx || my != (int16_t)my) {
619  av_log(h->avctx, AV_LOG_ERROR, "MV %d %d out of supported range\n", mx, my);
620  } else {
621  mvP->x = mx;
622  mvP->y = my;
623  }
624  }
625  set_mvs(mvP, size);
626 }
627 
628 /*****************************************************************************
629  *
630  * macroblock level
631  *
632  ****************************************************************************/
633 
634 /**
635  * initialise predictors for motion vectors and intra prediction
636  */
638 {
639  int i;
640 
641  /* copy predictors from top line (MB B and C) into cache */
642  for (i = 0; i < 3; i++) {
643  h->mv[MV_FWD_B2 + i] = h->top_mv[0][h->mbx * 2 + i];
644  h->mv[MV_BWD_B2 + i] = h->top_mv[1][h->mbx * 2 + i];
645  }
646  h->pred_mode_Y[1] = h->top_pred_Y[h->mbx * 2 + 0];
647  h->pred_mode_Y[2] = h->top_pred_Y[h->mbx * 2 + 1];
648  /* clear top predictors if MB B is not available */
649  if (!(h->flags & B_AVAIL)) {
650  h->mv[MV_FWD_B2] = un_mv;
651  h->mv[MV_FWD_B3] = un_mv;
652  h->mv[MV_BWD_B2] = un_mv;
653  h->mv[MV_BWD_B3] = un_mv;
654  h->pred_mode_Y[1] = h->pred_mode_Y[2] = NOT_AVAIL;
655  h->flags &= ~(C_AVAIL | D_AVAIL);
656  } else if (h->mbx) {
657  h->flags |= D_AVAIL;
658  }
659  if (h->mbx == h->mb_width - 1) // MB C not available
660  h->flags &= ~C_AVAIL;
661  /* clear top-right predictors if MB C is not available */
662  if (!(h->flags & C_AVAIL)) {
663  h->mv[MV_FWD_C2] = un_mv;
664  h->mv[MV_BWD_C2] = un_mv;
665  }
666  /* clear top-left predictors if MB D is not available */
667  if (!(h->flags & D_AVAIL)) {
668  h->mv[MV_FWD_D3] = un_mv;
669  h->mv[MV_BWD_D3] = un_mv;
670  }
671 }
672 
673 /**
674  * save predictors for later macroblocks and increase
675  * macroblock address
676  * @return 0 if end of frame is reached, 1 otherwise
677  */
679 {
680  int i;
681 
682  h->flags |= A_AVAIL;
683  h->cy += 16;
684  h->cu += 8;
685  h->cv += 8;
686  /* copy mvs as predictors to the left */
687  for (i = 0; i <= 20; i += 4)
688  h->mv[i] = h->mv[i + 2];
689  /* copy bottom mvs from cache to top line */
690  h->top_mv[0][h->mbx * 2 + 0] = h->mv[MV_FWD_X2];
691  h->top_mv[0][h->mbx * 2 + 1] = h->mv[MV_FWD_X3];
692  h->top_mv[1][h->mbx * 2 + 0] = h->mv[MV_BWD_X2];
693  h->top_mv[1][h->mbx * 2 + 1] = h->mv[MV_BWD_X3];
694  /* next MB address */
695  h->mbidx++;
696  h->mbx++;
697  if (h->mbx == h->mb_width) { // New mb line
698  h->flags = B_AVAIL | C_AVAIL;
699  /* clear left pred_modes */
700  h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
701  /* clear left mv predictors */
702  for (i = 0; i <= 20; i += 4)
703  h->mv[i] = un_mv;
704  h->mbx = 0;
705  h->mby++;
706  /* re-calculate sample pointers */
707  h->cy = h->cur.f->data[0] + h->mby * 16 * h->l_stride;
708  h->cu = h->cur.f->data[1] + h->mby * 8 * h->c_stride;
709  h->cv = h->cur.f->data[2] + h->mby * 8 * h->c_stride;
710  if (h->mby == h->mb_height) { // Frame end
711  return 0;
712  }
713  }
714  return 1;
715 }
716 
717 /*****************************************************************************
718  *
719  * frame level
720  *
721  ****************************************************************************/
722 
724 {
725  int i;
726 
727  /* clear some predictors */
728  for (i = 0; i <= 20; i += 4)
729  h->mv[i] = un_mv;
730  h->mv[MV_BWD_X0] = ff_cavs_dir_mv;
731  set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
732  h->mv[MV_FWD_X0] = ff_cavs_dir_mv;
733  set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
734  h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
735  h->cy = h->cur.f->data[0];
736  h->cu = h->cur.f->data[1];
737  h->cv = h->cur.f->data[2];
738  h->l_stride = h->cur.f->linesize[0];
739  h->c_stride = h->cur.f->linesize[1];
740  h->luma_scan[2] = 8 * h->l_stride;
741  h->luma_scan[3] = 8 * h->l_stride + 8;
742  h->mbx = h->mby = h->mbidx = 0;
743  h->flags = 0;
744 
745  return 0;
746 }
747 
748 /*****************************************************************************
749  *
750  * headers and interface
751  *
752  ****************************************************************************/
753 
754 /**
755  * some predictions require data from the top-neighbouring macroblock.
756  * this data has to be stored for one complete row of macroblocks
757  * and this storage space is allocated here
758  */
760 {
761  /* alloc top line of predictors */
762  h->top_qp = av_mallocz(h->mb_width);
763  h->top_mv[0] = av_mallocz_array(h->mb_width * 2 + 1, sizeof(cavs_vector));
764  h->top_mv[1] = av_mallocz_array(h->mb_width * 2 + 1, sizeof(cavs_vector));
765  h->top_pred_Y = av_mallocz_array(h->mb_width * 2, sizeof(*h->top_pred_Y));
766  h->top_border_y = av_mallocz_array(h->mb_width + 1, 16);
767  h->top_border_u = av_mallocz_array(h->mb_width, 10);
768  h->top_border_v = av_mallocz_array(h->mb_width, 10);
769 
770  /* alloc space for co-located MVs and types */
771  h->col_mv = av_mallocz_array(h->mb_width * h->mb_height,
772  4 * sizeof(cavs_vector));
773  h->col_type_base = av_mallocz(h->mb_width * h->mb_height);
774  h->block = av_mallocz(64 * sizeof(int16_t));
775 
776  if (!h->top_qp || !h->top_mv[0] || !h->top_mv[1] || !h->top_pred_Y ||
777  !h->top_border_y || !h->top_border_u || !h->top_border_v ||
778  !h->col_mv || !h->col_type_base || !h->block) {
779  av_freep(&h->top_qp);
780  av_freep(&h->top_mv[0]);
781  av_freep(&h->top_mv[1]);
782  av_freep(&h->top_pred_Y);
783  av_freep(&h->top_border_y);
784  av_freep(&h->top_border_u);
785  av_freep(&h->top_border_v);
786  av_freep(&h->col_mv);
787  av_freep(&h->col_type_base);
788  av_freep(&h->block);
789  return AVERROR(ENOMEM);
790  }
791  return 0;
792 }
793 
795 {
796  AVSContext *h = avctx->priv_data;
797 
798  ff_blockdsp_init(&h->bdsp, avctx);
799  ff_h264chroma_init(&h->h264chroma, 8);
800  ff_idctdsp_init(&h->idsp, avctx);
801  ff_videodsp_init(&h->vdsp, 8);
802  ff_cavsdsp_init(&h->cdsp, avctx);
803  ff_init_scantable_permutation(h->idsp.idct_permutation,
804  h->cdsp.idct_perm);
805  ff_init_scantable(h->idsp.idct_permutation, &h->scantable, ff_zigzag_direct);
806 
807  h->avctx = avctx;
808  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
809 
810  h->cur.f = av_frame_alloc();
811  h->DPB[0].f = av_frame_alloc();
812  h->DPB[1].f = av_frame_alloc();
813  if (!h->cur.f || !h->DPB[0].f || !h->DPB[1].f) {
814  ff_cavs_end(avctx);
815  return AVERROR(ENOMEM);
816  }
817 
818  h->luma_scan[0] = 0;
819  h->luma_scan[1] = 8;
820  h->intra_pred_l[INTRA_L_VERT] = intra_pred_vert;
821  h->intra_pred_l[INTRA_L_HORIZ] = intra_pred_horiz;
822  h->intra_pred_l[INTRA_L_LP] = intra_pred_lp;
823  h->intra_pred_l[INTRA_L_DOWN_LEFT] = intra_pred_down_left;
824  h->intra_pred_l[INTRA_L_DOWN_RIGHT] = intra_pred_down_right;
825  h->intra_pred_l[INTRA_L_LP_LEFT] = intra_pred_lp_left;
826  h->intra_pred_l[INTRA_L_LP_TOP] = intra_pred_lp_top;
827  h->intra_pred_l[INTRA_L_DC_128] = intra_pred_dc_128;
828  h->intra_pred_c[INTRA_C_LP] = intra_pred_lp;
829  h->intra_pred_c[INTRA_C_HORIZ] = intra_pred_horiz;
830  h->intra_pred_c[INTRA_C_VERT] = intra_pred_vert;
831  h->intra_pred_c[INTRA_C_PLANE] = intra_pred_plane;
832  h->intra_pred_c[INTRA_C_LP_LEFT] = intra_pred_lp_left;
833  h->intra_pred_c[INTRA_C_LP_TOP] = intra_pred_lp_top;
834  h->intra_pred_c[INTRA_C_DC_128] = intra_pred_dc_128;
835  h->mv[7] = un_mv;
836  h->mv[19] = un_mv;
837  return 0;
838 }
839 
841 {
842  AVSContext *h = avctx->priv_data;
843 
844  av_frame_free(&h->cur.f);
845  av_frame_free(&h->DPB[0].f);
846  av_frame_free(&h->DPB[1].f);
847 
848  av_freep(&h->top_qp);
849  av_freep(&h->top_mv[0]);
850  av_freep(&h->top_mv[1]);
851  av_freep(&h->top_pred_Y);
852  av_freep(&h->top_border_y);
853  av_freep(&h->top_border_u);
854  av_freep(&h->top_border_v);
855  av_freep(&h->col_mv);
856  av_freep(&h->col_type_base);
857  av_freep(&h->block);
858  av_freep(&h->edge_emu_buffer);
859  return 0;
860 }
BLK_16X16
@ BLK_16X16
Definition: cavs.h:114
left_modifier_l
static const int8_t left_modifier_l[8]
Definition: cavs.c:63
cavs_mb
cavs_mb
Definition: cavs.h:61
stride
int stride
Definition: mace.c:144
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
MV_PRED_PSKIP
@ MV_PRED_PSKIP
Definition: cavs.h:109
ff_cavs_partition_flags
const uint8_t ff_cavs_partition_flags[30]
Definition: cavsdata.c:24
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_cavs_chroma_qp
const uint8_t ff_cavs_chroma_qp[64]
Definition: cavsdata.c:57
MV_FWD_D3
@ MV_FWD_D3
Definition: cavs.h:121
MV_BWD_C2
@ MV_BWD_C2
Definition: cavs.h:134
MV_BWD_D3
@ MV_BWD_D3
Definition: cavs.h:131
MV_BWD_X0
@ MV_BWD_X0
Definition: cavs.h:136
mv
static const int8_t mv[256][2]
Definition: 4xm.c:77
INTRA_C_VERT
@ INTRA_C_VERT
Definition: cavs.h:97
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
MV_FWD_X0
@ MV_FWD_X0
Definition: cavs.h:126
un_mv
static const cavs_vector un_mv
mark block as unavailable, i.e.
Definition: cavs.c:61
AV_RN64
#define AV_RN64(p)
Definition: intreadwrite.h:368
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
ff_cavsdsp_init
av_cold void ff_cavsdsp_init(CAVSDSPContext *c, AVCodecContext *avctx)
Definition: cavsdsp.c:551
internal.h
b
#define b
Definition: input.c:41
intra_pred_down_right
static void intra_pred_down_right(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:323
LOWPASS
#define LOWPASS(ARRAY, INDEX)
Definition: cavs.c:304
ff_cavs_init
av_cold int ff_cavs_init(AVCodecContext *avctx)
Definition: cavs.c:794
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
ff_cavs_dir_mv
const cavs_vector ff_cavs_dir_mv
mark block as "no prediction from this direction" e.g.
Definition: cavsdata.c:66
cavs_block
cavs_block
Definition: cavs.h:113
scale_mv
static void scale_mv(AVSContext *h, int *d_x, int *d_y, cavs_vector *src, int distp)
Definition: cavs.c:537
INTRA_C_HORIZ
@ INTRA_C_HORIZ
Definition: cavs.h:96
ff_cavs_modify_mb_i
void ff_cavs_modify_mb_i(AVSContext *h, int *pred_mode_uv)
Definition: cavs.c:363
ff_cavs_load_intra_pred_chroma
void ff_cavs_load_intra_pred_chroma(AVSContext *h)
Definition: cavs.c:236
ff_crop_tab
#define ff_crop_tab
Definition: motionpixels_tablegen.c:26
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
MV_BWD_X3
@ MV_BWD_X3
Definition: cavs.h:140
ff_cavs_init_pic
int ff_cavs_init_pic(AVSContext *h)
Definition: cavs.c:723
golomb.h
exp golomb vlc stuff
set_mvs
static void set_mvs(cavs_vector *mv, enum cavs_block size)
Definition: cavs.h:250
MV_FWD_A3
@ MV_FWD_A3
Definition: cavs.h:128
INTRA_C_PLANE
@ INTRA_C_PLANE
Definition: cavs.h:98
cavs_vector::x
int16_t x
Definition: cavs.h:144
modify_pred
static void modify_pred(const int8_t *mod_table, int *mode)
Definition: cavs.c:354
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
tc_tab
static const uint8_t tc_tab[64]
Definition: cavs.c:52
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
SPLITV
#define SPLITV
Definition: cavs.h:56
src
#define src
Definition: vp8dsp.c:254
cavs_vector::dist
int16_t dist
Definition: cavs.h:146
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
B_AVAIL
#define B_AVAIL
Definition: cavs.h:40
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
I_8X8
@ I_8X8
Definition: cavs.h:62
MV_BWD_X2
@ MV_BWD_X2
Definition: cavs.h:139
ff_cavs_init_top_lines
int ff_cavs_init_top_lines(AVSContext *h)
some predictions require data from the top-neighbouring macroblock.
Definition: cavs.c:759
left_modifier_c
static const int8_t left_modifier_c[7]
Definition: cavs.c:65
INTRA_L_DOWN_RIGHT
@ INTRA_L_DOWN_RIGHT
Definition: cavs.h:88
mc_dir_part
static void mc_dir_part(AVSContext *h, AVFrame *pic, int chroma_height, int delta, int list, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int src_x_offset, int src_y_offset, qpel_mc_func *qpix_op, h264_chroma_mc_func chroma_op, cavs_vector *mv)
Definition: cavs.c:390
get_bits.h
MV_PRED_TOPRIGHT
@ MV_PRED_TOPRIGHT
Definition: cavs.h:108
MV_FWD_B3
@ MV_FWD_B3
Definition: cavs.h:123
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
MV_BWD_OFFS
#define MV_BWD_OFFS
Definition: cavs.h:58
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
MV_BWD_B2
@ MV_BWD_B2
Definition: cavs.h:132
if
if(ret)
Definition: filter_design.txt:179
INTRA_L_LP
@ INTRA_L_LP
Definition: cavs.h:86
NULL
#define NULL
Definition: coverity.c:32
top_modifier_l
static const int8_t top_modifier_l[8]
Definition: cavs.c:64
INTRA_C_DC_128
@ INTRA_C_DC_128
Definition: cavs.h:101
INTRA_L_DC_128
@ INTRA_L_DC_128
Definition: cavs.h:91
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
A_AVAIL
#define A_AVAIL
Definition: cavs.h:39
qpeldsp.h
abs
#define abs(x)
Definition: cuda_runtime.h:35
cavs_mv_loc
cavs_mv_loc
Definition: cavs.h:120
INTRA_C_LP
@ INTRA_C_LP
Definition: cavs.h:95
INTRA_L_VERT
@ INTRA_L_VERT
Definition: cavs.h:84
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
ff_cavs_mv
void ff_cavs_mv(AVSContext *h, enum cavs_mv_loc nP, enum cavs_mv_loc nC, enum cavs_mv_pred mode, enum cavs_block size, int ref)
Definition: cavs.c:575
D_AVAIL
#define D_AVAIL
Definition: cavs.h:42
FF_SIGNBIT
#define FF_SIGNBIT(x)
Definition: internal.h:88
intra_pred_down_left
static void intra_pred_down_left(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:315
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
SET_PARAMS
#define SET_PARAMS
Definition: cavs.c:93
MV_FWD_A1
@ MV_FWD_A1
Definition: cavs.h:125
INTRA_L_HORIZ
@ INTRA_L_HORIZ
Definition: cavs.h:85
size
int size
Definition: twinvq_data.h:11134
h264chroma.h
intra_pred_dc_128
static void intra_pred_dc_128(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:277
ff_cavs_end
av_cold int ff_cavs_end(AVCodecContext *avctx)
Definition: cavs.c:840
MV_FWD_B2
@ MV_FWD_B2
Definition: cavs.h:122
REF_INTRA
#define REF_INTRA
Definition: cavs.h:44
C_AVAIL
#define C_AVAIL
Definition: cavs.h:41
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
P_8X8
@ P_8X8
Definition: cavs.h:67
intra_pred_lp_left
static void intra_pred_lp_left(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:336
ff_init_scantable_permutation
av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation, enum idct_permutation_type perm_type)
Definition: idctdsp.c:50
MV_FWD_X3
@ MV_FWD_X3
Definition: cavs.h:130
ff_cavs_load_intra_pred_luma
void ff_cavs_load_intra_pred_luma(AVSContext *h, uint8_t *top, uint8_t **left, int block)
Definition: cavs.c:185
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
delta
float delta
Definition: vorbis_enc_data.h:457
get_bs
static int get_bs(cavs_vector *mvP, cavs_vector *mvQ, int b)
Definition: cavs.c:74
MV_PRED_TOP
@ MV_PRED_TOP
Definition: cavs.h:107
cavs_vector::y
int16_t y
Definition: cavs.h:145
uint8_t
uint8_t
Definition: audio_convert.c:194
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
beta_tab
static const uint8_t beta_tab[64]
Definition: cavs.c:45
MV_FWD_X2
@ MV_FWD_X2
Definition: cavs.h:129
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
mc_part_std
static void mc_part_std(AVSContext *h, int chroma_height, int delta, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int x_offset, int y_offset, qpel_mc_func *qpix_put, h264_chroma_mc_func chroma_put, qpel_mc_func *qpix_avg, h264_chroma_mc_func chroma_avg, cavs_vector *mv)
Definition: cavs.c:456
MV_FWD_C2
@ MV_FWD_C2
Definition: cavs.h:124
INTRA_L_DOWN_LEFT
@ INTRA_L_DOWN_LEFT
Definition: cavs.h:87
idctdsp.h
avcodec.h
ff_cavs_init_mb
void ff_cavs_init_mb(AVSContext *h)
initialise predictors for motion vectors and intra prediction
Definition: cavs.c:637
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
mv_pred_median
static void mv_pred_median(AVSContext *h, cavs_vector *mvP, cavs_vector *mvA, cavs_vector *mvB, cavs_vector *mvC)
Definition: cavs.c:545
mid_pred
#define mid_pred
Definition: mathops.h:97
cavs_mv_pred
cavs_mv_pred
Definition: cavs.h:104
INTRA_L_LP_LEFT
@ INTRA_L_LP_LEFT
Definition: cavs.h:89
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_cavs_inter
void ff_cavs_inter(AVSContext *h, enum cavs_mb mb_type)
Definition: cavs.c:494
INTRA_C_LP_TOP
@ INTRA_C_LP_TOP
Definition: cavs.h:100
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
mode
mode
Definition: ebur128.h:83
cm
#define cm
Definition: dvbsubdec.c:37
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
intra_pred_vert
static void intra_pred_vert(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:259
AVSContext
Definition: cavs.h:163
cavs_vector
Definition: cavs.h:143
tc
#define tc
Definition: regdef.h:69
cavs_vector::ref
int16_t ref
Definition: cavs.h:147
ff_cavs_next_mb
int ff_cavs_next_mb(AVSContext *h)
save predictors for later macroblocks and increase macroblock address
Definition: cavs.c:678
MV_BWD_B3
@ MV_BWD_B3
Definition: cavs.h:133
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
MV_PRED_LEFT
@ MV_PRED_LEFT
Definition: cavs.h:106
alpha_tab
static const uint8_t alpha_tab[64]
Definition: cavs.c:38
intra_pred_lp
static void intra_pred_lp(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:307
cavs.h
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_cavs_filter
void ff_cavs_filter(AVSContext *h, enum cavs_mb mb_type)
in-loop deblocking filter for a single macroblock
Definition: cavs.c:109
MV_FWD_X1
@ MV_FWD_X1
Definition: cavs.h:127
intra_pred_horiz
static void intra_pred_horiz(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:267
h
h
Definition: vp9dsp_template.c:2038
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
MAX_NEG_CROP
#define MAX_NEG_CROP
Definition: mathops.h:31
INTRA_C_LP_LEFT
@ INTRA_C_LP_LEFT
Definition: cavs.h:99
intra_pred_plane
static void intra_pred_plane(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:285
intra_pred_lp_top
static void intra_pred_lp_top(uint8_t *d, uint8_t *top, uint8_t *left, ptrdiff_t stride)
Definition: cavs.c:344
top_modifier_c
static const int8_t top_modifier_c[7]
Definition: cavs.c:66
NOT_AVAIL
#define NOT_AVAIL
Definition: cavs.h:43
INTRA_L_LP_TOP
@ INTRA_L_LP_TOP
Definition: cavs.h:90
SPLITH
#define SPLITH
Definition: cavs.h:55