FFmpeg
rv34.c
Go to the documentation of this file.
1 /*
2  * RV30/40 decoder common data
3  * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV30/40 decoder common data
25  */
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/mem_internal.h"
32 #include "libavutil/thread.h"
33 
34 #include "avcodec.h"
35 #include "decode.h"
36 #include "error_resilience.h"
37 #include "mpegutils.h"
38 #include "mpegvideo.h"
39 #include "mpegvideodec.h"
40 #include "golomb.h"
41 #include "mathops.h"
42 #include "mpeg_er.h"
43 #include "qpeldsp.h"
44 #include "rectangle.h"
45 #include "thread.h"
46 #include "threadframe.h"
47 
48 #include "rv34vlc.h"
49 #include "rv34data.h"
50 #include "rv34.h"
51 
52 static inline void ZERO8x2(void* dst, int stride)
53 {
54  fill_rectangle(dst, 1, 2, stride, 0, 4);
55  fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
56 }
57 
58 /** translation of RV30/40 macroblock types to lavc ones */
59 static const int rv34_mb_type_to_lavc[12] = {
72 };
73 
74 
76 
77 static int rv34_decode_mv(RV34DecContext *r, int block_type);
78 
79 /**
80  * @name RV30/40 VLC generating functions
81  * @{
82  */
83 
84 static VLCElem table_data[117592];
85 
86 /**
87  * Generate VLC from codeword lengths.
88  * @param bits codeword lengths (zeroes are accepted)
89  * @param size length of input data
90  * @param vlc output VLC
91  * @param insyms symbols for input codes (NULL for default ones)
92  * @param num VLC table number (for static initialization)
93  */
94 static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc,
95  const uint8_t *syms, int *offset)
96 {
97  int counts[17] = {0}, codes[17];
98  uint16_t cw[MAX_VLC_SIZE];
99  int maxbits;
100 
101  for (int i = 0; i < size; i++)
102  counts[bits[i]]++;
103 
104  /* bits[0] is zero for some tables, i.e. syms actually starts at 1.
105  * So we reset it here. The code assigned to this element is 0x00. */
106  codes[0] = counts[0] = 0;
107  for (int i = 0; i < 16; i++) {
108  codes[i+1] = (codes[i] + counts[i]) << 1;
109  if (counts[i])
110  maxbits = i;
111  }
112  for (int i = 0; i < size; i++)
113  cw[i] = codes[bits[i]]++;
114 
115  vlc->table = &table_data[*offset];
117  ff_vlc_init_sparse(vlc, FFMIN(maxbits, 9), size,
118  bits, 1, 1,
119  cw, 2, 2,
120  syms, !!syms, !!syms, VLC_INIT_STATIC_OVERLONG);
121  *offset += vlc->table_size;
122 }
123 
124 static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp,
125  int *offset)
126 {
127  VLC vlc = { 0 };
129  *vlcp = vlc.table;
130 }
131 
132 /**
133  * Initialize all tables.
134  */
135 static av_cold void rv34_init_tables(void)
136 {
137  int i, j, k, offset = 0;
138 
139  for(i = 0; i < NUM_INTRA_TABLES; i++){
140  for(j = 0; j < 2; j++){
142  &intra_vlcs[i].cbppattern[j], &offset);
144  &intra_vlcs[i].second_pattern[j], &offset);
146  &intra_vlcs[i].third_pattern[j], &offset);
147  for(k = 0; k < 4; k++){
149  &intra_vlcs[i].cbp[j][k], rv34_cbp_code, &offset);
150  }
151  }
152  for(j = 0; j < 4; j++){
154  &intra_vlcs[i].first_pattern[j], &offset);
155  }
157  &intra_vlcs[i].coefficient, &offset);
158  }
159 
160  for(i = 0; i < NUM_INTER_TABLES; i++){
162  &inter_vlcs[i].cbppattern[0], &offset);
163  for(j = 0; j < 4; j++){
165  &inter_vlcs[i].cbp[0][j], rv34_cbp_code, &offset);
166  }
167  for(j = 0; j < 2; j++){
169  &inter_vlcs[i].first_pattern[j], &offset);
171  &inter_vlcs[i].second_pattern[j], &offset);
173  &inter_vlcs[i].third_pattern[j], &offset);
174  }
176  &inter_vlcs[i].coefficient, &offset);
177  }
178 }
179 
180 /** @} */ // vlc group
181 
182 /**
183  * @name RV30/40 4x4 block decoding functions
184  * @{
185  */
186 
187 /**
188  * Decode coded block pattern.
189  */
190 static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
191 {
192  int pattern, code, cbp=0;
193  int ones;
194  static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
195  static const int shifts[4] = { 0, 2, 8, 10 };
196  const int *curshift = shifts;
197  int i, t, mask;
198 
199  code = get_vlc2(gb, vlc->cbppattern[table], 9, 2);
200  pattern = code & 0xF;
201  code >>= 4;
202 
203  ones = rv34_count_ones[pattern];
204 
205  for(mask = 8; mask; mask >>= 1, curshift++){
206  if(pattern & mask)
207  cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
208  }
209 
210  for(i = 0; i < 4; i++){
211  t = (modulo_three_table[code] >> (6 - 2*i)) & 3;
212  if(t == 1)
213  cbp |= cbp_masks[get_bits1(gb)] << i;
214  if(t == 2)
215  cbp |= cbp_masks[2] << i;
216  }
217  return cbp;
218 }
219 
220 /**
221  * Get one coefficient value from the bitstream and store it.
222  */
223 static inline void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb,
224  const VLCElem *vlc, int q)
225 {
226  if(coef){
227  if(coef == esc){
228  coef = get_vlc2(gb, vlc, 9, 2);
229  if(coef > 23){
230  coef -= 23;
231  coef = 22 + ((1 << coef) | get_bits(gb, coef));
232  }
233  coef += esc;
234  }
235  if(get_bits1(gb))
236  coef = -coef;
237  *dst = (coef*q + 8) >> 4;
238  }
239 }
240 
241 /**
242  * Decode 2x2 subblock of coefficients.
243  */
244 static inline void decode_subblock(int16_t *dst, int code, const int is_block2,
245  GetBitContext *gb, const VLCElem *vlc, int q)
246 {
248 
249  decode_coeff( dst+0*4+0, (flags >> 6) , 3, gb, vlc, q);
250  if(is_block2){
251  decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
252  decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
253  }else{
254  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
255  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
256  }
257  decode_coeff( dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
258 }
259 
260 /**
261  * Decode a single coefficient.
262  */
263 static inline void decode_subblock1(int16_t *dst, int code, GetBitContext *gb,
264  const VLCElem *vlc, int q)
265 {
266  int coeff = modulo_three_table[code] >> 6;
267  decode_coeff(dst, coeff, 3, gb, vlc, q);
268 }
269 
270 static inline void decode_subblock3(int16_t *dst, int code, GetBitContext *gb,
271  const VLCElem *vlc,
272  int q_dc, int q_ac1, int q_ac2)
273 {
275 
276  decode_coeff(dst+0*4+0, (flags >> 6) , 3, gb, vlc, q_dc);
277  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
278  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
279  decode_coeff(dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
280 }
281 
282 /**
283  * Decode coefficients for 4x4 block.
284  *
285  * This is done by filling 2x2 subblocks with decoded coefficients
286  * in this order (the same for subblocks and subblock coefficients):
287  * o--o
288  * /
289  * /
290  * o--o
291  */
292 
293 static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc,
294  int fc, int sc, int q_dc, int q_ac1, int q_ac2)
295 {
296  int code, pattern, has_ac = 1;
297 
298  code = get_vlc2(gb, rvlc->first_pattern[fc], 9, 2);
299 
300  pattern = code & 0x7;
301 
302  code >>= 3;
303 
304  if (modulo_three_table[code] & 0x3F) {
305  decode_subblock3(dst, code, gb, rvlc->coefficient, q_dc, q_ac1, q_ac2);
306  } else {
307  decode_subblock1(dst, code, gb, rvlc->coefficient, q_dc);
308  if (!pattern)
309  return 0;
310  has_ac = 0;
311  }
312 
313  if(pattern & 4){
314  code = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
315  decode_subblock(dst + 4*0+2, code, 0, gb, rvlc->coefficient, q_ac2);
316  }
317  if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
318  code = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
319  decode_subblock(dst + 4*2+0, code, 1, gb, rvlc->coefficient, q_ac2);
320  }
321  if(pattern & 1){
322  code = get_vlc2(gb, rvlc->third_pattern[sc], 9, 2);
323  decode_subblock(dst + 4*2+2, code, 0, gb, rvlc->coefficient, q_ac2);
324  }
325  return has_ac | pattern;
326 }
327 
328 /**
329  * @name RV30/40 bitstream parsing
330  * @{
331  */
332 
333 /**
334  * Decode starting slice position.
335  * @todo Maybe replace with ff_h263_decode_mba() ?
336  */
338 {
339  int i;
340  for(i = 0; i < 5; i++)
341  if(rv34_mb_max_sizes[i] >= mb_size - 1)
342  break;
343  return rv34_mb_bits_sizes[i];
344 }
345 
346 /**
347  * Select VLC set for decoding from current quantizer, modifier and frame type.
348  */
349 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
350 {
351  if(mod == 2 && quant < 19) quant += 10;
352  else if(mod && quant < 26) quant += 5;
353  av_assert2(quant >= 0 && quant < 32);
356 }
357 
358 /**
359  * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
360  */
361 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
362 {
363  MpegEncContext *s = &r->s;
364  GetBitContext *gb = &s->gb;
365  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
366  int t;
367 
368  r->is16 = get_bits1(gb);
369  if(r->is16){
370  s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA16x16;
371  r->block_type = RV34_MB_TYPE_INTRA16x16;
372  t = get_bits(gb, 2);
373  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
374  r->luma_vlc = 2;
375  }else{
376  if(!r->rv30){
377  if(!get_bits1(gb))
378  av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
379  }
380  s->current_picture_ptr->mb_type[mb_pos] = MB_TYPE_INTRA;
381  r->block_type = RV34_MB_TYPE_INTRA;
382  if(r->decode_intra_types(r, gb, intra_types) < 0)
383  return -1;
384  r->luma_vlc = 1;
385  }
386 
387  r->chroma_vlc = 0;
388  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
389 
390  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
391 }
392 
393 /**
394  * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
395  */
396 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
397 {
398  MpegEncContext *s = &r->s;
399  GetBitContext *gb = &s->gb;
400  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
401  int i, t;
402 
403  r->block_type = r->decode_mb_info(r);
404  if(r->block_type == -1)
405  return -1;
406  s->current_picture_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
407  r->mb_type[mb_pos] = r->block_type;
408  if(r->block_type == RV34_MB_SKIP){
409  if(s->pict_type == AV_PICTURE_TYPE_P)
410  r->mb_type[mb_pos] = RV34_MB_P_16x16;
411  if(s->pict_type == AV_PICTURE_TYPE_B)
412  r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
413  }
414  r->is16 = !!IS_INTRA16x16(s->current_picture_ptr->mb_type[mb_pos]);
415  if (rv34_decode_mv(r, r->block_type) < 0)
416  return -1;
417  if(r->block_type == RV34_MB_SKIP){
418  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
419  return 0;
420  }
421  r->chroma_vlc = 1;
422  r->luma_vlc = 0;
423 
424  if(IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
425  if(r->is16){
426  t = get_bits(gb, 2);
427  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
428  r->luma_vlc = 2;
429  }else{
430  if(r->decode_intra_types(r, gb, intra_types) < 0)
431  return -1;
432  r->luma_vlc = 1;
433  }
434  r->chroma_vlc = 0;
435  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
436  }else{
437  for(i = 0; i < 16; i++)
438  intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
439  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
440  if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
441  r->is16 = 1;
442  r->chroma_vlc = 1;
443  r->luma_vlc = 2;
444  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
445  }
446  }
447 
448  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
449 }
450 
451 /** @} */ //bitstream functions
452 
453 /**
454  * @name motion vector related code (prediction, reconstruction, motion compensation)
455  * @{
456  */
457 
458 /** macroblock partition width in 8x8 blocks */
459 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
460 
461 /** macroblock partition height in 8x8 blocks */
462 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
463 
464 /** availability index for subblocks */
465 static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
466 
467 /**
468  * motion vector prediction
469  *
470  * Motion prediction performed for the block by using median prediction of
471  * motion vectors from the left, top and right top blocks but in corner cases
472  * some other vectors may be used instead.
473  */
474 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
475 {
476  MpegEncContext *s = &r->s;
477  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
478  int A[2] = {0}, B[2], C[2];
479  int i, j;
480  int mx, my;
481  int* avail = r->avail_cache + avail_indexes[subblock_no];
482  int c_off = part_sizes_w[block_type];
483 
484  mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
485  if(subblock_no == 3)
486  c_off = -1;
487 
488  if(avail[-1]){
489  A[0] = s->current_picture_ptr->motion_val[0][mv_pos-1][0];
490  A[1] = s->current_picture_ptr->motion_val[0][mv_pos-1][1];
491  }
492  if(avail[-4]){
493  B[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][0];
494  B[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride][1];
495  }else{
496  B[0] = A[0];
497  B[1] = A[1];
498  }
499  if(!avail[c_off-4]){
500  if(avail[-4] && (avail[-1] || r->rv30)){
501  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][0];
502  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride-1][1];
503  }else{
504  C[0] = A[0];
505  C[1] = A[1];
506  }
507  }else{
508  C[0] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0];
509  C[1] = s->current_picture_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1];
510  }
511  mx = mid_pred(A[0], B[0], C[0]);
512  my = mid_pred(A[1], B[1], C[1]);
513  mx += r->dmv[dmv_no][0];
514  my += r->dmv[dmv_no][1];
515  for(j = 0; j < part_sizes_h[block_type]; j++){
516  for(i = 0; i < part_sizes_w[block_type]; i++){
517  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx;
518  s->current_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my;
519  }
520  }
521 }
522 
523 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
524 
525 /**
526  * Calculate motion vector component that should be added for direct blocks.
527  */
528 static int calc_add_mv(RV34DecContext *r, int dir, int val)
529 {
530  int mul = dir ? -r->mv_weight2 : r->mv_weight1;
531 
532  return (int)(val * (SUINT)mul + 0x2000) >> 14;
533 }
534 
535 /**
536  * Predict motion vector for B-frame macroblock.
537  */
538 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
539  int A_avail, int B_avail, int C_avail,
540  int *mx, int *my)
541 {
542  if(A_avail + B_avail + C_avail != 3){
543  *mx = A[0] + B[0] + C[0];
544  *my = A[1] + B[1] + C[1];
545  if(A_avail + B_avail + C_avail == 2){
546  *mx /= 2;
547  *my /= 2;
548  }
549  }else{
550  *mx = mid_pred(A[0], B[0], C[0]);
551  *my = mid_pred(A[1], B[1], C[1]);
552  }
553 }
554 
555 /**
556  * motion vector prediction for B-frames
557  */
558 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
559 {
560  MpegEncContext *s = &r->s;
561  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
562  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
563  int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
564  int has_A = 0, has_B = 0, has_C = 0;
565  int mx, my;
566  int i, j;
567  Picture *cur_pic = s->current_picture_ptr;
568  const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
569  int type = cur_pic->mb_type[mb_pos];
570 
571  if((r->avail_cache[6-1] & type) & mask){
572  A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
573  A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
574  has_A = 1;
575  }
576  if((r->avail_cache[6-4] & type) & mask){
577  B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
578  B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
579  has_B = 1;
580  }
581  if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
582  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
583  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
584  has_C = 1;
585  }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
586  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
587  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
588  has_C = 1;
589  }
590 
591  rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
592 
593  mx += r->dmv[dir][0];
594  my += r->dmv[dir][1];
595 
596  for(j = 0; j < 2; j++){
597  for(i = 0; i < 2; i++){
598  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
599  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
600  }
601  }
602  if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
603  ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
604  }
605 }
606 
607 /**
608  * motion vector prediction - RV3 version
609  */
610 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
611 {
612  MpegEncContext *s = &r->s;
613  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
614  int A[2] = {0}, B[2], C[2];
615  int i, j, k;
616  int mx, my;
617  int* avail = r->avail_cache + avail_indexes[0];
618 
619  if(avail[-1]){
620  A[0] = s->current_picture_ptr->motion_val[0][mv_pos - 1][0];
621  A[1] = s->current_picture_ptr->motion_val[0][mv_pos - 1][1];
622  }
623  if(avail[-4]){
624  B[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][0];
625  B[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride][1];
626  }else{
627  B[0] = A[0];
628  B[1] = A[1];
629  }
630  if(!avail[-4 + 2]){
631  if(avail[-4] && (avail[-1])){
632  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0];
633  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1];
634  }else{
635  C[0] = A[0];
636  C[1] = A[1];
637  }
638  }else{
639  C[0] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0];
640  C[1] = s->current_picture_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1];
641  }
642  mx = mid_pred(A[0], B[0], C[0]);
643  my = mid_pred(A[1], B[1], C[1]);
644  mx += r->dmv[0][0];
645  my += r->dmv[0][1];
646  for(j = 0; j < 2; j++){
647  for(i = 0; i < 2; i++){
648  for(k = 0; k < 2; k++){
649  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
650  s->current_picture_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
651  }
652  }
653  }
654 }
655 
656 static const int chroma_coeffs[3] = { 0, 3, 5 };
657 
658 /**
659  * generic motion compensation function
660  *
661  * @param r decoder context
662  * @param block_type type of the current block
663  * @param xoff horizontal offset from the start of the current block
664  * @param yoff vertical offset from the start of the current block
665  * @param mv_off offset to the motion vector information
666  * @param width width of the current partition in 8x8 blocks
667  * @param height height of the current partition in 8x8 blocks
668  * @param dir motion compensation direction (i.e. from the last or the next reference frame)
669  * @param thirdpel motion vectors are specified in 1/3 of pixel
670  * @param qpel_mc a set of functions used to perform luma motion compensation
671  * @param chroma_mc a set of functions used to perform chroma motion compensation
672  */
673 static inline void rv34_mc(RV34DecContext *r, const int block_type,
674  const int xoff, const int yoff, int mv_off,
675  const int width, const int height, int dir,
676  const int thirdpel, int weighted,
677  qpel_mc_func (*qpel_mc)[16],
679 {
680  MpegEncContext *s = &r->s;
681  uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
682  int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
683  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
684  int is16x16 = 1;
685  int emu = 0;
686 
687  if(thirdpel){
688  int chroma_mx, chroma_my;
689  mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24);
690  my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24);
691  lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3;
692  ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3;
693  chroma_mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
694  chroma_my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
695  umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
696  umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
697  uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
698  uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
699  }else{
700  int cx, cy;
701  mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2;
702  my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2;
703  lx = s->current_picture_ptr->motion_val[dir][mv_pos][0] & 3;
704  ly = s->current_picture_ptr->motion_val[dir][mv_pos][1] & 3;
705  cx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 2;
706  cy = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 2;
707  umx = cx >> 2;
708  umy = cy >> 2;
709  uvmx = (cx & 3) << 1;
710  uvmy = (cy & 3) << 1;
711  //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
712  if(uvmx == 6 && uvmy == 6)
713  uvmx = uvmy = 4;
714  }
715 
716  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
717  /* wait for the referenced mb row to be finished */
718  int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
719  const ThreadFrame *f = dir ? &s->next_picture_ptr->tf : &s->last_picture_ptr->tf;
720  ff_thread_await_progress(f, mb_row, 0);
721  }
722 
723  dxy = ly*4 + lx;
724  srcY = dir ? s->next_picture_ptr->f->data[0] : s->last_picture_ptr->f->data[0];
725  srcU = dir ? s->next_picture_ptr->f->data[1] : s->last_picture_ptr->f->data[1];
726  srcV = dir ? s->next_picture_ptr->f->data[2] : s->last_picture_ptr->f->data[2];
727  src_x = s->mb_x * 16 + xoff + mx;
728  src_y = s->mb_y * 16 + yoff + my;
729  uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
730  uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
731  srcY += src_y * s->linesize + src_x;
732  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
733  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
734  if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
735  (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
736  (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
737  srcY -= 2 + 2*s->linesize;
738  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
739  s->linesize, s->linesize,
740  (width << 3) + 6, (height << 3) + 6,
741  src_x - 2, src_y - 2,
742  s->h_edge_pos, s->v_edge_pos);
743  srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize;
744  emu = 1;
745  }
746  if(!weighted){
747  Y = s->dest[0] + xoff + yoff *s->linesize;
748  U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
749  V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
750  }else{
751  Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize;
752  U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
753  V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
754  }
755 
756  if(block_type == RV34_MB_P_16x8){
757  qpel_mc[1][dxy](Y, srcY, s->linesize);
758  Y += 8;
759  srcY += 8;
760  }else if(block_type == RV34_MB_P_8x16){
761  qpel_mc[1][dxy](Y, srcY, s->linesize);
762  Y += 8 * s->linesize;
763  srcY += 8 * s->linesize;
764  }
765  is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
766  qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
767  if (emu) {
768  uint8_t *uvbuf = s->sc.edge_emu_buffer;
769 
770  s->vdsp.emulated_edge_mc(uvbuf, srcU,
771  s->uvlinesize, s->uvlinesize,
772  (width << 2) + 1, (height << 2) + 1,
773  uvsrc_x, uvsrc_y,
774  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
775  srcU = uvbuf;
776  uvbuf += 9*s->uvlinesize;
777 
778  s->vdsp.emulated_edge_mc(uvbuf, srcV,
779  s->uvlinesize, s->uvlinesize,
780  (width << 2) + 1, (height << 2) + 1,
781  uvsrc_x, uvsrc_y,
782  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
783  srcV = uvbuf;
784  }
785  chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
786  chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
787 }
788 
789 static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
790  const int xoff, const int yoff, int mv_off,
791  const int width, const int height, int dir)
792 {
793  rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0,
794  r->rdsp.put_pixels_tab,
795  r->rdsp.put_chroma_pixels_tab);
796 }
797 
799 {
800  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.dest[0],
801  r->tmp_b_block_y[0],
802  r->tmp_b_block_y[1],
803  r->weight1,
804  r->weight2,
805  r->s.linesize);
806  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[1],
807  r->tmp_b_block_uv[0],
808  r->tmp_b_block_uv[2],
809  r->weight1,
810  r->weight2,
811  r->s.uvlinesize);
812  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[2],
813  r->tmp_b_block_uv[1],
814  r->tmp_b_block_uv[3],
815  r->weight1,
816  r->weight2,
817  r->s.uvlinesize);
818 }
819 
820 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
821 {
822  int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192;
823 
824  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted,
825  r->rdsp.put_pixels_tab,
826  r->rdsp.put_chroma_pixels_tab);
827  if(!weighted){
828  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0,
829  r->rdsp.avg_pixels_tab,
830  r->rdsp.avg_chroma_pixels_tab);
831  }else{
832  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1,
833  r->rdsp.put_pixels_tab,
834  r->rdsp.put_chroma_pixels_tab);
835  rv4_weight(r);
836  }
837 }
838 
840 {
841  int i, j;
842  int weighted = !r->rv30 && r->weight1 != 8192;
843 
844  for(j = 0; j < 2; j++)
845  for(i = 0; i < 2; i++){
846  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
847  weighted,
848  r->rdsp.put_pixels_tab,
849  r->rdsp.put_chroma_pixels_tab);
850  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
851  weighted,
852  weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
853  weighted ? r->rdsp.put_chroma_pixels_tab : r->rdsp.avg_chroma_pixels_tab);
854  }
855  if(weighted)
856  rv4_weight(r);
857 }
858 
859 /** number of motion vectors in each macroblock type */
860 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
861 
862 /**
863  * Decode motion vector differences
864  * and perform motion vector reconstruction and motion compensation.
865  */
866 static int rv34_decode_mv(RV34DecContext *r, int block_type)
867 {
868  MpegEncContext *s = &r->s;
869  GetBitContext *gb = &s->gb;
870  int i, j, k, l;
871  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
872  int next_bt;
873 
874  memset(r->dmv, 0, sizeof(r->dmv));
875  for(i = 0; i < num_mvs[block_type]; i++){
876  r->dmv[i][0] = get_interleaved_se_golomb(gb);
877  r->dmv[i][1] = get_interleaved_se_golomb(gb);
878  if (r->dmv[i][0] == INVALID_VLC ||
879  r->dmv[i][1] == INVALID_VLC) {
880  r->dmv[i][0] = r->dmv[i][1] = 0;
881  return AVERROR_INVALIDDATA;
882  }
883  }
884  switch(block_type){
885  case RV34_MB_TYPE_INTRA:
887  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
888  return 0;
889  case RV34_MB_SKIP:
890  if(s->pict_type == AV_PICTURE_TYPE_P){
891  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
892  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
893  break;
894  }
895  case RV34_MB_B_DIRECT:
896  //surprisingly, it uses motion scheme from next reference frame
897  /* wait for the current mb row to be finished */
898  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
899  ff_thread_await_progress(&s->next_picture_ptr->tf, FFMAX(0, s->mb_y-1), 0);
900 
901  next_bt = s->next_picture_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride];
902  if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
903  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
904  ZERO8x2(s->current_picture_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
905  }else
906  for(j = 0; j < 2; j++)
907  for(i = 0; i < 2; i++)
908  for(k = 0; k < 2; k++)
909  for(l = 0; l < 2; l++)
910  s->current_picture_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_picture_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]);
911  if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
912  rv34_mc_2mv(r, block_type);
913  else
915  ZERO8x2(s->current_picture_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
916  break;
917  case RV34_MB_P_16x16:
918  case RV34_MB_P_MIX16x16:
919  rv34_pred_mv(r, block_type, 0, 0);
920  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
921  break;
922  case RV34_MB_B_FORWARD:
923  case RV34_MB_B_BACKWARD:
924  r->dmv[1][0] = r->dmv[0][0];
925  r->dmv[1][1] = r->dmv[0][1];
926  if(r->rv30)
927  rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
928  else
929  rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
930  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
931  break;
932  case RV34_MB_P_16x8:
933  case RV34_MB_P_8x16:
934  rv34_pred_mv(r, block_type, 0, 0);
935  rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
936  if(block_type == RV34_MB_P_16x8){
937  rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
938  rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
939  }
940  if(block_type == RV34_MB_P_8x16){
941  rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
942  rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
943  }
944  break;
945  case RV34_MB_B_BIDIR:
946  rv34_pred_mv_b (r, block_type, 0);
947  rv34_pred_mv_b (r, block_type, 1);
948  rv34_mc_2mv (r, block_type);
949  break;
950  case RV34_MB_P_8x8:
951  for(i=0;i< 4;i++){
952  rv34_pred_mv(r, block_type, i, i);
953  rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
954  }
955  break;
956  }
957 
958  return 0;
959 }
960 /** @} */ // mv group
961 
962 /**
963  * @name Macroblock reconstruction functions
964  * @{
965  */
966 /** mapping of RV30/40 intra prediction types to standard H.264 types */
967 static const int ittrans[9] = {
970 };
971 
972 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
973 static const int ittrans16[4] = {
975 };
976 
977 /**
978  * Perform 4x4 intra prediction.
979  */
980 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
981 {
982  uint8_t *prev = dst - stride + 4;
983  uint32_t topleft;
984 
985  if(!up && !left)
986  itype = DC_128_PRED;
987  else if(!up){
988  if(itype == VERT_PRED) itype = HOR_PRED;
989  if(itype == DC_PRED) itype = LEFT_DC_PRED;
990  }else if(!left){
991  if(itype == HOR_PRED) itype = VERT_PRED;
992  if(itype == DC_PRED) itype = TOP_DC_PRED;
994  }
995  if(!down){
997  if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
998  if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
999  }
1000  if(!right && up){
1001  topleft = dst[-stride + 3] * 0x01010101u;
1002  prev = (uint8_t*)&topleft;
1003  }
1004  r->h.pred4x4[itype](dst, prev, stride);
1005 }
1006 
1007 static inline int adjust_pred16(int itype, int up, int left)
1008 {
1009  if(!up && !left)
1010  itype = DC_128_PRED8x8;
1011  else if(!up){
1012  if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
1013  if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
1014  if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
1015  }else if(!left){
1016  if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
1017  if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
1018  if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
1019  }
1020  return itype;
1021 }
1022 
1024  uint8_t *pdst, int stride,
1025  int fc, int sc, int q_dc, int q_ac)
1026 {
1027  MpegEncContext *s = &r->s;
1028  int16_t *ptr = s->block[0];
1029  int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
1030  fc, sc, q_dc, q_ac, q_ac);
1031  if(has_ac){
1032  r->rdsp.rv34_idct_add(pdst, stride, ptr);
1033  }else{
1034  r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
1035  ptr[0] = 0;
1036  }
1037 }
1038 
1039 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1040 {
1041  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1042  MpegEncContext *s = &r->s;
1043  GetBitContext *gb = &s->gb;
1044  int q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1045  q_ac = rv34_qscale_tab[s->qscale];
1046  uint8_t *dst = s->dest[0];
1047  int16_t *ptr = s->block[0];
1048  int i, j, itype, has_ac;
1049 
1050  memset(block16, 0, 16 * sizeof(*block16));
1051 
1052  has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1053  if(has_ac)
1054  r->rdsp.rv34_inv_transform(block16);
1055  else
1056  r->rdsp.rv34_inv_transform_dc(block16);
1057 
1058  itype = ittrans16[intra_types[0]];
1059  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1060  r->h.pred16x16[itype](dst, s->linesize);
1061 
1062  for(j = 0; j < 4; j++){
1063  for(i = 0; i < 4; i++, cbp >>= 1){
1064  int dc = block16[i + j*4];
1065 
1066  if(cbp & 1){
1067  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1068  }else
1069  has_ac = 0;
1070 
1071  if(has_ac){
1072  ptr[0] = dc;
1073  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1074  }else
1075  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1076  }
1077 
1078  dst += 4*s->linesize;
1079  }
1080 
1081  itype = ittrans16[intra_types[0]];
1082  if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1083  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1084 
1085  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1086  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1087 
1088  for(j = 1; j < 3; j++){
1089  dst = s->dest[j];
1090  r->h.pred8x8[itype](dst, s->uvlinesize);
1091  for(i = 0; i < 4; i++, cbp >>= 1){
1092  uint8_t *pdst;
1093  if(!(cbp & 1)) continue;
1094  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1095 
1096  rv34_process_block(r, pdst, s->uvlinesize,
1097  r->chroma_vlc, 1, q_dc, q_ac);
1098  }
1099  }
1100 }
1101 
1102 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1103 {
1104  MpegEncContext *s = &r->s;
1105  uint8_t *dst = s->dest[0];
1106  int avail[6*8] = {0};
1107  int i, j, k;
1108  int idx, q_ac, q_dc;
1109 
1110  // Set neighbour information.
1111  if(r->avail_cache[1])
1112  avail[0] = 1;
1113  if(r->avail_cache[2])
1114  avail[1] = avail[2] = 1;
1115  if(r->avail_cache[3])
1116  avail[3] = avail[4] = 1;
1117  if(r->avail_cache[4])
1118  avail[5] = 1;
1119  if(r->avail_cache[5])
1120  avail[8] = avail[16] = 1;
1121  if(r->avail_cache[9])
1122  avail[24] = avail[32] = 1;
1123 
1124  q_ac = rv34_qscale_tab[s->qscale];
1125  for(j = 0; j < 4; j++){
1126  idx = 9 + j*8;
1127  for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1128  rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1129  avail[idx] = 1;
1130  if(!(cbp & 1)) continue;
1131 
1132  rv34_process_block(r, dst, s->linesize,
1133  r->luma_vlc, 0, q_ac, q_ac);
1134  }
1135  dst += s->linesize * 4 - 4*4;
1136  intra_types += r->intra_types_stride;
1137  }
1138 
1139  intra_types -= r->intra_types_stride * 4;
1140 
1141  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1142  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1143 
1144  for(k = 0; k < 2; k++){
1145  dst = s->dest[1+k];
1146  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1147 
1148  for(j = 0; j < 2; j++){
1149  int* acache = r->avail_cache + 6 + j*4;
1150  for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1151  int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1152  rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1153  acache[0] = 1;
1154 
1155  if(!(cbp&1)) continue;
1156 
1157  rv34_process_block(r, dst + 4*i, s->uvlinesize,
1158  r->chroma_vlc, 1, q_dc, q_ac);
1159  }
1160 
1161  dst += 4*s->uvlinesize;
1162  }
1163  }
1164 }
1165 
1166 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
1167 {
1168  int d;
1169  d = motion_val[0][0] - motion_val[-step][0];
1170  if(d < -3 || d > 3)
1171  return 1;
1172  d = motion_val[0][1] - motion_val[-step][1];
1173  if(d < -3 || d > 3)
1174  return 1;
1175  return 0;
1176 }
1177 
1179 {
1180  MpegEncContext *s = &r->s;
1181  int hmvmask = 0, vmvmask = 0, i, j;
1182  int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
1183  int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
1184  for(j = 0; j < 16; j += 8){
1185  for(i = 0; i < 2; i++){
1186  if(is_mv_diff_gt_3(motion_val + i, 1))
1187  vmvmask |= 0x11 << (j + i*2);
1188  if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
1189  hmvmask |= 0x03 << (j + i*2);
1190  }
1191  motion_val += s->b8_stride;
1192  }
1193  if(s->first_slice_line)
1194  hmvmask &= ~0x000F;
1195  if(!s->mb_x)
1196  vmvmask &= ~0x1111;
1197  if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
1198  vmvmask |= (vmvmask & 0x4444) >> 1;
1199  hmvmask |= (hmvmask & 0x0F00) >> 4;
1200  if(s->mb_x)
1201  r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
1202  if(!s->first_slice_line)
1203  r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
1204  }
1205  return hmvmask | vmvmask;
1206 }
1207 
1208 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1209 {
1210  MpegEncContext *s = &r->s;
1211  GetBitContext *gb = &s->gb;
1212  uint8_t *dst = s->dest[0];
1213  int16_t *ptr = s->block[0];
1214  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1215  int cbp, cbp2;
1216  int q_dc, q_ac, has_ac;
1217  int i, j;
1218  int dist;
1219 
1220  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1221  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1222  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1223  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1224  if(s->mb_x && dist)
1225  r->avail_cache[5] =
1226  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1227  if(dist >= s->mb_width)
1228  r->avail_cache[2] =
1229  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1230  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1231  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1232  if(s->mb_x && dist > s->mb_width)
1233  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1234 
1235  s->qscale = r->si.quant;
1236  cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1237  r->cbp_luma [mb_pos] = cbp;
1238  r->cbp_chroma[mb_pos] = cbp >> 16;
1239  r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1240  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1241 
1242  if(cbp == -1)
1243  return -1;
1244 
1245  if (IS_INTRA(s->current_picture_ptr->mb_type[mb_pos])){
1246  if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1247  else rv34_output_intra(r, intra_types, cbp);
1248  return 0;
1249  }
1250 
1251  if(r->is16){
1252  // Only for RV34_MB_P_MIX16x16
1253  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1254  memset(block16, 0, 16 * sizeof(*block16));
1255  q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1256  q_ac = rv34_qscale_tab[s->qscale];
1257  if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1258  r->rdsp.rv34_inv_transform(block16);
1259  else
1260  r->rdsp.rv34_inv_transform_dc(block16);
1261 
1262  q_ac = rv34_qscale_tab[s->qscale];
1263 
1264  for(j = 0; j < 4; j++){
1265  for(i = 0; i < 4; i++, cbp >>= 1){
1266  int dc = block16[i + j*4];
1267 
1268  if(cbp & 1){
1269  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1270  }else
1271  has_ac = 0;
1272 
1273  if(has_ac){
1274  ptr[0] = dc;
1275  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1276  }else
1277  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1278  }
1279 
1280  dst += 4*s->linesize;
1281  }
1282 
1283  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1284  }else{
1285  q_ac = rv34_qscale_tab[s->qscale];
1286 
1287  for(j = 0; j < 4; j++){
1288  for(i = 0; i < 4; i++, cbp >>= 1){
1289  if(!(cbp & 1)) continue;
1290 
1291  rv34_process_block(r, dst + 4*i, s->linesize,
1292  r->luma_vlc, 0, q_ac, q_ac);
1293  }
1294  dst += 4*s->linesize;
1295  }
1296  }
1297 
1298  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1299  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1300 
1301  for(j = 1; j < 3; j++){
1302  dst = s->dest[j];
1303  for(i = 0; i < 4; i++, cbp >>= 1){
1304  uint8_t *pdst;
1305  if(!(cbp & 1)) continue;
1306  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1307 
1308  rv34_process_block(r, pdst, s->uvlinesize,
1309  r->chroma_vlc, 1, q_dc, q_ac);
1310  }
1311  }
1312 
1313  return 0;
1314 }
1315 
1316 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1317 {
1318  MpegEncContext *s = &r->s;
1319  int cbp, dist;
1320  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1321 
1322  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1323  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1324  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1325  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1326  if(s->mb_x && dist)
1327  r->avail_cache[5] =
1328  r->avail_cache[9] = s->current_picture_ptr->mb_type[mb_pos - 1];
1329  if(dist >= s->mb_width)
1330  r->avail_cache[2] =
1331  r->avail_cache[3] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride];
1332  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1333  r->avail_cache[4] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride + 1];
1334  if(s->mb_x && dist > s->mb_width)
1335  r->avail_cache[1] = s->current_picture_ptr->mb_type[mb_pos - s->mb_stride - 1];
1336 
1337  s->qscale = r->si.quant;
1338  cbp = rv34_decode_intra_mb_header(r, intra_types);
1339  r->cbp_luma [mb_pos] = cbp;
1340  r->cbp_chroma[mb_pos] = cbp >> 16;
1341  r->deblock_coefs[mb_pos] = 0xFFFF;
1342  s->current_picture_ptr->qscale_table[mb_pos] = s->qscale;
1343 
1344  if(cbp == -1)
1345  return -1;
1346 
1347  if(r->is16){
1348  rv34_output_i16x16(r, intra_types, cbp);
1349  return 0;
1350  }
1351 
1352  rv34_output_intra(r, intra_types, cbp);
1353  return 0;
1354 }
1355 
1357 {
1358  int bits;
1359  if(s->mb_y >= s->mb_height)
1360  return 1;
1361  if(!s->mb_num_left)
1362  return 1;
1363  if(r->s.mb_skip_run > 1)
1364  return 0;
1365  bits = get_bits_left(&s->gb);
1366  if(bits <= 0 || (bits < 8 && !show_bits(&s->gb, bits)))
1367  return 1;
1368  return 0;
1369 }
1370 
1371 
1373 {
1374  av_freep(&r->intra_types_hist);
1375  r->intra_types = NULL;
1376  av_freep(&r->tmp_b_block_base);
1377  av_freep(&r->mb_type);
1378  av_freep(&r->cbp_luma);
1379  av_freep(&r->cbp_chroma);
1380  av_freep(&r->deblock_coefs);
1381 }
1382 
1383 
1385 {
1386  r->intra_types_stride = r->s.mb_width * 4 + 4;
1387 
1388  r->cbp_chroma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1389  sizeof(*r->cbp_chroma));
1390  r->cbp_luma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1391  sizeof(*r->cbp_luma));
1392  r->deblock_coefs = av_mallocz(r->s.mb_stride * r->s.mb_height *
1393  sizeof(*r->deblock_coefs));
1394  r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
1395  sizeof(*r->intra_types_hist));
1396  r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
1397  sizeof(*r->mb_type));
1398 
1399  if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
1400  r->intra_types_hist && r->mb_type)) {
1401  r->s.context_reinit = 1;
1403  return AVERROR(ENOMEM);
1404  }
1405 
1406  r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
1407 
1408  return 0;
1409 }
1410 
1411 
1413 {
1415  return rv34_decoder_alloc(r);
1416 }
1417 
1418 
1419 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
1420 {
1421  MpegEncContext *s = &r->s;
1422  GetBitContext *gb = &s->gb;
1423  int mb_pos, slice_type;
1424  int res;
1425 
1426  init_get_bits(&r->s.gb, buf, buf_size*8);
1427  res = r->parse_slice_header(r, gb, &r->si);
1428  if(res < 0){
1429  av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
1430  return -1;
1431  }
1432 
1433  slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
1434  if (slice_type != s->pict_type) {
1435  av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
1436  return AVERROR_INVALIDDATA;
1437  }
1438  if (s->width != r->si.width || s->height != r->si.height) {
1439  av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
1440  return AVERROR_INVALIDDATA;
1441  }
1442 
1443  r->si.end = end;
1444  s->qscale = r->si.quant;
1445  s->mb_num_left = r->si.end - r->si.start;
1446  r->s.mb_skip_run = 0;
1447 
1448  mb_pos = s->mb_x + s->mb_y * s->mb_width;
1449  if(r->si.start != mb_pos){
1450  av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
1451  s->mb_x = r->si.start % s->mb_width;
1452  s->mb_y = r->si.start / s->mb_width;
1453  }
1454  memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
1455  s->first_slice_line = 1;
1456  s->resync_mb_x = s->mb_x;
1457  s->resync_mb_y = s->mb_y;
1458 
1460  while(!check_slice_end(r, s)) {
1461  ff_update_block_index(s, 8, 0, 1);
1462 
1463  if(r->si.type)
1464  res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1465  else
1466  res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1467  if(res < 0){
1468  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1469  return -1;
1470  }
1471  if (++s->mb_x == s->mb_width) {
1472  s->mb_x = 0;
1473  s->mb_y++;
1475 
1476  memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1477  memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1478 
1479  if(r->loop_filter && s->mb_y >= 2)
1480  r->loop_filter(r, s->mb_y - 2);
1481 
1482  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1483  ff_thread_report_progress(&s->current_picture_ptr->tf,
1484  s->mb_y - 2, 0);
1485 
1486  }
1487  if(s->mb_x == s->resync_mb_x)
1488  s->first_slice_line=0;
1489  s->mb_num_left--;
1490  }
1491  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
1492 
1493  return s->mb_y == s->mb_height;
1494 }
1495 
1496 /** @} */ // reconstruction group end
1497 
1498 /**
1499  * Initialize decoder.
1500  */
1502 {
1503  static AVOnce init_static_once = AV_ONCE_INIT;
1504  RV34DecContext *r = avctx->priv_data;
1505  MpegEncContext *s = &r->s;
1506  int ret;
1507 
1508  ff_mpv_decode_init(s, avctx);
1509  s->out_format = FMT_H263;
1510 
1511  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1512  avctx->has_b_frames = 1;
1513  s->low_delay = 0;
1514 
1515  if ((ret = ff_mpv_common_init(s)) < 0)
1516  return ret;
1517 
1518  ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
1519 
1520  if ((ret = rv34_decoder_alloc(r)) < 0) {
1521  ff_mpv_common_end(&r->s);
1522  return ret;
1523  }
1524 
1525  ff_thread_once(&init_static_once, rv34_init_tables);
1526 
1527  return 0;
1528 }
1529 
1531 {
1532  RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
1533  MpegEncContext * const s = &r->s, * const s1 = &r1->s;
1534  int err;
1535 
1536  if (dst == src || !s1->context_initialized)
1537  return 0;
1538 
1539  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
1540  s->height = s1->height;
1541  s->width = s1->width;
1542  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1543  return err;
1544  if ((err = rv34_decoder_realloc(r)) < 0)
1545  return err;
1546  }
1547 
1548  r->cur_pts = r1->cur_pts;
1549  r->last_pts = r1->last_pts;
1550  r->next_pts = r1->next_pts;
1551 
1552  memset(&r->si, 0, sizeof(r->si));
1553 
1554  // Do no call ff_mpeg_update_thread_context on a partially initialized
1555  // decoder context.
1556  if (!s1->context_initialized)
1557  return 0;
1558 
1559  return ff_mpeg_update_thread_context(dst, src);
1560 }
1561 
1562 static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
1563 {
1564  if (n < slice_count) {
1565  return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
1566  } else
1567  return buf_size;
1568 }
1569 
1570 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
1571 {
1572  RV34DecContext *r = avctx->priv_data;
1573  MpegEncContext *s = &r->s;
1574  int got_picture = 0, ret;
1575 
1576  ff_er_frame_end(&s->er, NULL);
1578  s->mb_num_left = 0;
1579 
1580  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1581  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1582 
1583  if (s->pict_type == AV_PICTURE_TYPE_B) {
1584  if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
1585  return ret;
1586  ff_print_debug_info(s, s->current_picture_ptr, pict);
1587  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1588  got_picture = 1;
1589  } else if (s->last_picture_ptr) {
1590  if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
1591  return ret;
1592  ff_print_debug_info(s, s->last_picture_ptr, pict);
1593  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1594  got_picture = 1;
1595  }
1596 
1597  return got_picture;
1598 }
1599 
1600 static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
1601 {
1602  // attempt to keep aspect during typical resolution switches
1603  if (!sar.num)
1604  sar = (AVRational){1, 1};
1605 
1606  sar = av_mul_q(sar, av_mul_q((AVRational){new_h, new_w}, (AVRational){old_w, old_h}));
1607  return sar;
1608 }
1609 
1611  int *got_picture_ptr, AVPacket *avpkt)
1612 {
1613  const uint8_t *buf = avpkt->data;
1614  int buf_size = avpkt->size;
1615  RV34DecContext *r = avctx->priv_data;
1616  MpegEncContext *s = &r->s;
1617  SliceInfo si;
1618  int i, ret;
1619  int slice_count;
1620  const uint8_t *slices_hdr = NULL;
1621  int last = 0;
1622  int faulty_b = 0;
1623  int offset;
1624 
1625  /* no supplementary picture */
1626  if (buf_size == 0) {
1627  /* special case for last picture */
1628  if (s->next_picture_ptr) {
1629  if ((ret = av_frame_ref(pict, s->next_picture_ptr->f)) < 0)
1630  return ret;
1631  s->next_picture_ptr = NULL;
1632 
1633  *got_picture_ptr = 1;
1634  }
1635  return 0;
1636  }
1637 
1638  slice_count = (*buf++) + 1;
1639  slices_hdr = buf + 4;
1640  buf += 8 * slice_count;
1641  buf_size -= 1 + 8 * slice_count;
1642 
1643  offset = get_slice_offset(avctx, slices_hdr, 0, slice_count, buf_size);
1644  //parse first slice header to check whether this frame can be decoded
1645  if(offset < 0 || offset > buf_size){
1646  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1647  return AVERROR_INVALIDDATA;
1648  }
1649  init_get_bits(&s->gb, buf+offset, (buf_size-offset)*8);
1650  if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
1651  av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
1652  return AVERROR_INVALIDDATA;
1653  }
1654  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->data[0]) &&
1655  si.type == AV_PICTURE_TYPE_B) {
1656  av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
1657  "reference data.\n");
1658  faulty_b = 1;
1659  }
1660  if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
1661  || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
1662  || avctx->skip_frame >= AVDISCARD_ALL)
1663  return avpkt->size;
1664 
1665  /* first slice */
1666  if (si.start == 0) {
1667  if (s->mb_num_left > 0 && s->current_picture_ptr) {
1668  av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
1669  s->mb_num_left);
1670  if (!s->context_reinit)
1671  ff_er_frame_end(&s->er, NULL);
1673  }
1674 
1675  if (s->width != si.width || s->height != si.height || s->context_reinit) {
1676  int err;
1677 
1678  av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
1679  si.width, si.height);
1680 
1681  if (av_image_check_size(si.width, si.height, 0, s->avctx))
1682  return AVERROR_INVALIDDATA;
1683 
1684  s->avctx->sample_aspect_ratio = update_sar(
1685  s->width, s->height, s->avctx->sample_aspect_ratio,
1686  si.width, si.height);
1687  s->width = si.width;
1688  s->height = si.height;
1689 
1690  err = ff_set_dimensions(s->avctx, s->width, s->height);
1691  if (err < 0)
1692  return err;
1693  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1694  return err;
1695  if ((err = rv34_decoder_realloc(r)) < 0)
1696  return err;
1697  }
1698  if (faulty_b)
1699  return AVERROR_INVALIDDATA;
1700  s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
1701  if (ff_mpv_frame_start(s, s->avctx) < 0)
1702  return -1;
1704  if (!r->tmp_b_block_base) {
1705  int i;
1706 
1707  r->tmp_b_block_base = av_malloc(s->linesize * 48);
1708  if (!r->tmp_b_block_base)
1709  return AVERROR(ENOMEM);
1710  for (i = 0; i < 2; i++)
1711  r->tmp_b_block_y[i] = r->tmp_b_block_base
1712  + i * 16 * s->linesize;
1713  for (i = 0; i < 4; i++)
1714  r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
1715  + (i >> 1) * 8 * s->uvlinesize
1716  + (i & 1) * 16;
1717  }
1718  r->cur_pts = si.pts;
1719  if (s->pict_type != AV_PICTURE_TYPE_B) {
1720  r->last_pts = r->next_pts;
1721  r->next_pts = r->cur_pts;
1722  } else {
1723  int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
1724  int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
1725  int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
1726 
1727  if(!refdist){
1728  r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
1729  r->scaled_weight = 0;
1730  }else{
1731  if (FFMAX(dist0, dist1) > refdist)
1732  av_log(avctx, AV_LOG_TRACE, "distance overflow\n");
1733 
1734  r->mv_weight1 = (dist0 << 14) / refdist;
1735  r->mv_weight2 = (dist1 << 14) / refdist;
1736  if((r->mv_weight1|r->mv_weight2) & 511){
1737  r->weight1 = r->mv_weight1;
1738  r->weight2 = r->mv_weight2;
1739  r->scaled_weight = 0;
1740  }else{
1741  r->weight1 = r->mv_weight1 >> 9;
1742  r->weight2 = r->mv_weight2 >> 9;
1743  r->scaled_weight = 1;
1744  }
1745  }
1746  }
1747  s->mb_x = s->mb_y = 0;
1748  ff_thread_finish_setup(s->avctx);
1749  } else if (s->context_reinit) {
1750  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames to "
1751  "reinitialize (start MB is %d).\n", si.start);
1752  return AVERROR_INVALIDDATA;
1753  } else if (HAVE_THREADS &&
1754  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1755  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
1756  "multithreading mode (start MB is %d).\n", si.start);
1757  return AVERROR_INVALIDDATA;
1758  }
1759 
1760  for(i = 0; i < slice_count; i++){
1761  int offset = get_slice_offset(avctx, slices_hdr, i , slice_count, buf_size);
1762  int offset1 = get_slice_offset(avctx, slices_hdr, i+1, slice_count, buf_size);
1763  int size;
1764 
1765  if(offset < 0 || offset > offset1 || offset1 > buf_size){
1766  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1767  break;
1768  }
1769  size = offset1 - offset;
1770 
1771  r->si.end = s->mb_width * s->mb_height;
1772  s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
1773 
1774  if(i+1 < slice_count){
1775  int offset2 = get_slice_offset(avctx, slices_hdr, i+2, slice_count, buf_size);
1776  if (offset2 < offset1 || offset2 > buf_size) {
1777  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1778  break;
1779  }
1780  init_get_bits(&s->gb, buf+offset1, (buf_size-offset1)*8);
1781  if(r->parse_slice_header(r, &r->s.gb, &si) < 0){
1782  size = offset2 - offset;
1783  }else
1784  r->si.end = si.start;
1785  }
1786  av_assert0 (size >= 0 && size <= buf_size - offset);
1787  last = rv34_decode_slice(r, r->si.end, buf + offset, size);
1788  if(last)
1789  break;
1790  }
1791 
1792  if (s->current_picture_ptr) {
1793  if (last) {
1794  if(r->loop_filter)
1795  r->loop_filter(r, s->mb_height - 1);
1796 
1797  ret = finish_frame(avctx, pict);
1798  if (ret < 0)
1799  return ret;
1800  *got_picture_ptr = ret;
1801  } else if (HAVE_THREADS &&
1802  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1803  av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
1804  /* always mark the current frame as finished, frame-mt supports
1805  * only complete frames */
1806  ff_er_frame_end(&s->er, NULL);
1808  s->mb_num_left = 0;
1809  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1810  return AVERROR_INVALIDDATA;
1811  }
1812  }
1813 
1814  return avpkt->size;
1815 }
1816 
1818 {
1819  RV34DecContext *r = avctx->priv_data;
1820 
1821  ff_mpv_common_end(&r->s);
1823 
1824  return 0;
1825 }
RV34DecContext
decoder context
Definition: rv34.h:86
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:682
A
#define A(x)
Definition: vpx_arith.h:28
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:77
rv34_mb_type_to_lavc
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
Definition: rv34.c:59
HOR_PRED8x8
#define HOR_PRED8x8
Definition: h264pred.h:69
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:55
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
rv34_qscale_tab
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
Definition: rv34data.h:84
rv34_output_intra
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1102
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
r
const char * r
Definition: vf_curves.c:127
ff_rv34_decode_end
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
Definition: rv34.c:1817
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:467
rv34_pred_mv_rv3
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
Definition: rv34.c:610
mem_internal.h
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
thread.h
rv34_table_inter_secondpat
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3737
ittrans16
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
Definition: rv34.c:973
num_mvs
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
Definition: rv34.c:860
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:43
chroma_coeffs
static const int chroma_coeffs[3]
Definition: rv34.c:656
ff_rv34_get_start_offset
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
Definition: rv34.c:337
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
ff_rv34_decode_update_thread_context
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: rv34.c:1530
AVPacket::data
uint8_t * data
Definition: packet.h:524
DC_PRED
@ DC_PRED
Definition: vp9.h:48
table
static const uint16_t table[]
Definition: prosumer.c:205
rv34_decoder_realloc
static int rv34_decoder_realloc(RV34DecContext *r)
Definition: rv34.c:1412
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:42
check_slice_end
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
Definition: rv34.c:1356
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:464
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:821
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:843
chroma_mc
#define chroma_mc(a)
Definition: vc1dsp.c:786
mpegvideo.h
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:56
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
rv34_set_deblock_coef
static int rv34_set_deblock_coef(RV34DecContext *r)
Definition: rv34.c:1178
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:40
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
avail_indexes
static const uint8_t avail_indexes[4]
availability index for subblocks
Definition: rv34.c:465
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
golomb.h
exp golomb vlc stuff
NUM_INTRA_TABLES
#define NUM_INTRA_TABLES
Definition: rv34vlc.h:32
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
adjust_pred16
static int adjust_pred16(int itype, int up, int left)
Definition: rv34.c:1007
RV34_MB_B_FORWARD
@ RV34_MB_B_FORWARD
B-frame macroblock, forward prediction.
Definition: rv34.h:49
rv34_decoder_alloc
static int rv34_decoder_alloc(RV34DecContext *r)
Definition: rv34.c:1384
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1819
VERT_PRED
@ VERT_PRED
Definition: vp9.h:46
rv34_pred_mv
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
Definition: rv34.c:474
GetBitContext
Definition: get_bits.h:108
RV34VLC::first_pattern
const VLCElem * first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
Definition: rv34.h:68
DIAG_DOWN_RIGHT_PRED
@ DIAG_DOWN_RIGHT_PRED
Definition: vp9.h:50
rv34_decode_block
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
Definition: rv34.c:293
RV34_MB_B_DIRECT
@ RV34_MB_B_DIRECT
Bidirectionally predicted B-frame macroblock, no motion vectors.
Definition: rv34.h:52
val
static double val(void *priv, double ch)
Definition: aeval.c:78
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:460
rv34_count_ones
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
Definition: rv34data.h:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
rv34_table_intra_firstpat
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:940
rv34data.h
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:783
avassert.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
mpegvideodec.h
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
HOR_PRED
@ HOR_PRED
Definition: vp9.h:47
av_cold
#define av_cold
Definition: attributes.h:90
ff_rv34_decode_init
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: rv34.c:1501
rv34_pred_4x4_block
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
Definition: rv34.c:980
rv34_decode_intra_macroblock
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1316
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:573
ZERO8x2
static void ZERO8x2(void *dst, int stride)
Definition: rv34.c:52
mask
static const uint16_t mask[17]
Definition: lzw.c:38
RV34VLC
VLC tables used by the decoder.
Definition: rv34.h:65
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:723
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:891
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:190
width
#define width
rv34_mc_1mv
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
Definition: rv34.c:789
rv34_decode_inter_macroblock
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1208
intra_vlcs
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
Definition: rv34.c:75
s
#define s(width, name)
Definition: cbs_vp9.c:198
IS_16X8
#define IS_16X8(a)
Definition: mpegutils.h:75
s1
#define s1
Definition: regdef.h:38
VERT_LEFT_PRED_RV40_NODOWN
#define VERT_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:56
RV34VLC::cbp
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
Definition: rv34.h:67
CBPPAT_VLC_SIZE
#define CBPPAT_VLC_SIZE
Definition: rv34vlc.h:35
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:48
calc_add_mv
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
Definition: rv34.c:528
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:150
LEFT_DC_PRED
@ LEFT_DC_PRED
Definition: vp9.h:56
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
B
#define B
Definition: huffyuv.h:42
decode.h
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:69
CBP_VLC_SIZE
#define CBP_VLC_SIZE
Definition: rv34vlc.h:36
IS_INTRA
#define IS_INTRA(x, y)
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
finish_frame
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
Definition: rv34.c:1570
rv34_mb_max_sizes
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
Definition: rv34data.h:106
decode_coeff
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, const VLCElem *vlc, int q)
Get one coefficient value from the bitstream and store it.
Definition: rv34.c:223
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
MB_TYPE_8x16
#define MB_TYPE_8x16
Definition: mpegutils.h:44
TOP_DC_PRED8x8
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
RV34VLC::second_pattern
const VLCElem * second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
Definition: rv34.h:69
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
threadframe.h
rv34_inter_coeff
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:4024
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
RV34VLC::cbppattern
const VLCElem * cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
Definition: rv34.h:66
NULL
#define NULL
Definition: coverity.c:32
GET_PTS_DIFF
#define GET_PTS_DIFF(a, b)
Definition: rv34.c:523
rv34_decode_slice
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
Definition: rv34.c:1419
rv34_init_tables
static av_cold void rv34_init_tables(void)
Initialize all tables.
Definition: rv34.c:135
RV34_MB_SKIP
@ RV34_MB_SKIP
Skipped block.
Definition: rv34.h:51
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
decode_subblock
static void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, const VLCElem *vlc, int q)
Decode 2x2 subblock of coefficients.
Definition: rv34.c:244
COEFF_VLC_SIZE
#define COEFF_VLC_SIZE
Definition: rv34vlc.h:39
rv34_table_intra_cbppat
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:42
RV34VLC::third_pattern
const VLCElem * third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
Definition: rv34.h:70
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:45
SliceInfo::type
int type
slice type (intra, inter)
Definition: rv34.h:76
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:37
decode_subblock3
static void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, const VLCElem *vlc, int q_dc, int q_ac1, int q_ac2)
Definition: rv34.c:270
V
#define V
Definition: avdct.c:31
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
mathops.h
VERT_PRED8x8
#define VERT_PRED8x8
Definition: h264pred.h:70
qpeldsp.h
rv34_gen_vlc_ext
static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms, int *offset)
Generate VLC from codeword lengths.
Definition: rv34.c:94
rv34_table_intra_secondpat
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2074
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
MAX_VLC_SIZE
#define MAX_VLC_SIZE
Definition: rv34vlc.h:40
rv34.h
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AVOnce
#define AVOnce
Definition: thread.h:202
rv34_decode_mv
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation.
Definition: rv34.c:866
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
RV34_MB_P_8x8
@ RV34_MB_P_8x8
P-frame macroblock, 8x8 motion compensation partitions.
Definition: rv34.h:48
rv34_table_intra_thirdpat
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2177
VLC::table_allocated
int table_allocated
Definition: vlc.h:39
rv34_mc_2mv_skip
static void rv34_mc_2mv_skip(RV34DecContext *r)
Definition: rv34.c:839
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
f
f
Definition: af_crystalizer.c:121
rv34_cbp_code
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
Definition: rv34data.h:42
is_mv_diff_gt_3
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
Definition: rv34.c:1166
AVPacket::size
int size
Definition: packet.h:525
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:384
RV34_MB_B_BACKWARD
@ RV34_MB_B_BACKWARD
B-frame macroblock, backward prediction.
Definition: rv34.h:50
ff_rv34_decode_frame
int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_picture_ptr, AVPacket *avpkt)
Definition: rv34.c:1610
rectangle.h
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:600
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
update_sar
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
Definition: rv34.c:1600
FIRSTBLK_VLC_SIZE
#define FIRSTBLK_VLC_SIZE
Definition: rv34vlc.h:37
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
RV34_MB_P_8x16
@ RV34_MB_P_8x16
P-frame macroblock, 8x16 motion compensation partitions.
Definition: rv34.h:54
size
int size
Definition: twinvq_data.h:10344
VERT_RIGHT_PRED
@ VERT_RIGHT_PRED
Definition: vp9.h:51
VLCElem
Definition: vlc.h:32
decode_subblock1
static void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, const VLCElem *vlc, int q)
Decode a single coefficient.
Definition: rv34.c:263
rv34_decode_cbp
static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
Decode coded block pattern.
Definition: rv34.c:190
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
DC_128_PRED8x8
#define DC_128_PRED8x8
Definition: h264pred.h:76
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:50
rv34_inter_cbppat
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:2305
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:323
height
#define height
SliceInfo::pts
int pts
frame timestamp
Definition: rv34.h:82
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
OTHERBLK_VLC_SIZE
#define OTHERBLK_VLC_SIZE
Definition: rv34vlc.h:38
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:64
ff_vlc_init_sparse
int ff_vlc_init_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Build VLC decoding tables suitable for use with get_vlc2().
Definition: vlc.c:250
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
PLANE_PRED8x8
#define PLANE_PRED8x8
Definition: h264pred.h:71
Y
#define Y
Definition: boxblur.h:37
rv34_output_i16x16
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1039
RV34_MB_TYPE_INTRA16x16
@ RV34_MB_TYPE_INTRA16x16
Intra macroblock with DCs in a separate 4x4 block.
Definition: rv34.h:46
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
rv34_pred_mv_b
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
Definition: rv34.c:558
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1593
rv34_table_inter_thirdpat
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3880
DIAG_DOWN_LEFT_PRED_RV40_NODOWN
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:54
SliceInfo::height
int height
coded height
Definition: rv34.h:81
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:57
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
AV_CODEC_ID_RV40
@ AV_CODEC_ID_RV40
Definition: codec_id.h:121
part_sizes_h
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
Definition: rv34.c:462
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
rv34_table_inter_firstpat
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:2936
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:45
HOR_UP_PRED_RV40_NODOWN
#define HOR_UP_PRED_RV40_NODOWN
Definition: h264pred.h:55
rv34_mc_2mv
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
Definition: rv34.c:820
rv34_table_intra_cbp
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
Definition: rv34vlc.h:886
RV34_MB_TYPE_INTRA
@ RV34_MB_TYPE_INTRA
Intra macroblock.
Definition: rv34.h:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
SUINT
#define SUINT
Definition: dct32_template.c:30
RV34_MB_TYPES
@ RV34_MB_TYPES
Definition: rv34.h:57
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
table_data
static VLCElem table_data[117592]
Definition: rv34.c:84
rv34_quant_to_vlc_set
static const uint8_t rv34_quant_to_vlc_set[2][32]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
Definition: rv34data.h:95
SliceInfo
essential slice information
Definition: rv34.h:75
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
get_slice_offset
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
Definition: rv34.c:1562
mod
static int mod(int a, int b)
Modulo operation with only positive remainders.
Definition: vf_v360.c:750
LEFT_DC_PRED8x8
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
VLC::bits
int bits
Definition: vlc.h:37
mid_pred
#define mid_pred
Definition: mathops.h:98
ret
ret
Definition: filter_design.txt:187
INVALID_VLC
#define INVALID_VLC
Definition: golomb.h:37
RV34VLC::coefficient
const VLCElem * coefficient
VLCs used for decoding big coefficients.
Definition: rv34.h:71
rv4_weight
static void rv4_weight(RV34DecContext *r)
Definition: rv34.c:798
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
rv34_inter_cbp
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
Definition: rv34vlc.h:2890
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:62
AVCodecContext
main external API structure.
Definition: avcodec.h:445
VLC_INIT_STATIC_OVERLONG
#define VLC_INIT_STATIC_OVERLONG
Definition: vlc.h:183
SliceInfo::start
int start
Definition: rv34.h:79
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
ThreadFrame
Definition: threadframe.h:27
rv34_decode_inter_mb_header
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:396
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
HOR_UP_PRED
@ HOR_UP_PRED
Definition: vp9.h:54
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
rv34_intra_coeff
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:2281
error_resilience.h
part_sizes_w
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
Definition: rv34.c:459
VLC
Definition: vlc.h:36
ittrans
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
Definition: rv34.c:967
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:826
rv34_chroma_quant
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
Definition: rv34data.h:74
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:452
VLC::table
VLCElem * table
Definition: vlc.h:38
rv34_decode_intra_mb_header
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:361
HOR_DOWN_PRED
@ HOR_DOWN_PRED
Definition: vp9.h:52
rv34_mb_bits_sizes
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
Definition: rv34data.h:111
IS_8X16
#define IS_8X16(a)
Definition: mpegutils.h:76
rv34_process_block
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
Definition: rv34.c:1023
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
RV34_MB_P_MIX16x16
@ RV34_MB_P_MIX16x16
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
Definition: rv34.h:56
rv34vlc.h
VLC::table_size
int table_size
Definition: vlc.h:39
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
mem.h
rv34_mc
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
Definition: rv34.c:673
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:38
MB_TYPE_SEPARATE_DC
#define MB_TYPE_SEPARATE_DC
Definition: rv34.h:38
RV34_MB_P_16x8
@ RV34_MB_P_16x8
P-frame macroblock, 16x8 motion compensation partitions.
Definition: rv34.h:53
TOP_DC_PRED
@ TOP_DC_PRED
Definition: vp9.h:57
AVPacket
This structure stores compressed data.
Definition: packet.h:501
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
inter_vlcs
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
Definition: rv34.c:75
mpeg_er.h
d
d
Definition: ffmpeg_filter.c:424
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
SliceInfo::width
int width
coded width
Definition: rv34.h:80
imgutils.h
MB_TYPE_DIRECT2
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:47
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
RV34_MB_P_16x16
@ RV34_MB_P_16x16
P-frame macroblock, one motion frame.
Definition: rv34.h:47
choose_vlc_set
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
Definition: rv34.c:349
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
RV34_MB_B_BIDIR
@ RV34_MB_B_BIDIR
Bidirectionally predicted B-frame macroblock, two motion vectors.
Definition: rv34.h:55
modulo_three_table
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
Definition: rv34data.h:53
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
rv34_gen_vlc
static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp, int *offset)
Definition: rv34.c:124
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:215
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
rv34_decoder_free
static void rv34_decoder_free(RV34DecContext *r)
Definition: rv34.c:1372
shifts
static const uint8_t shifts[2][12]
Definition: camellia.c:178
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:61
NUM_INTER_TABLES
#define NUM_INTER_TABLES
Definition: rv34vlc.h:33
rv34_pred_b_vector
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
Definition: rv34.c:538