FFmpeg
rv34.c
Go to the documentation of this file.
1 /*
2  * RV30/40 decoder common data
3  * Copyright (c) 2007 Mike Melanson, Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * RV30/40 decoder common data
25  */
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/imgutils.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/mem_internal.h"
32 #include "libavutil/thread.h"
33 
34 #include "avcodec.h"
35 #include "decode.h"
36 #include "error_resilience.h"
37 #include "mpegutils.h"
38 #include "mpegvideo.h"
39 #include "mpegvideodec.h"
40 #include "golomb.h"
41 #include "mathops.h"
42 #include "mpeg_er.h"
43 #include "qpeldsp.h"
44 #include "rectangle.h"
45 #include "thread.h"
46 #include "threadprogress.h"
47 
48 #include "rv34vlc.h"
49 #include "rv34data.h"
50 #include "rv34.h"
51 
52 static inline void ZERO8x2(void* dst, int stride)
53 {
54  fill_rectangle(dst, 1, 2, stride, 0, 4);
55  fill_rectangle(((uint8_t*)(dst))+4, 1, 2, stride, 0, 4);
56 }
57 
58 /** translation of RV30/40 macroblock types to lavc ones */
59 static const int rv34_mb_type_to_lavc[12] = {
72 };
73 
74 
76 
77 static int rv34_decode_mv(RV34DecContext *r, int block_type);
78 
79 /**
80  * @name RV30/40 VLC generating functions
81  * @{
82  */
83 
84 static VLCElem table_data[117592];
85 
86 /**
87  * Generate VLC from codeword lengths.
88  * @param bits codeword lengths (zeroes are accepted)
89  * @param size length of input data
90  * @param vlc output VLC
91  * @param insyms symbols for input codes (NULL for default ones)
92  * @param num VLC table number (for static initialization)
93  */
94 static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc,
95  const uint8_t *syms, int *offset)
96 {
97  int counts[17] = {0}, codes[17];
98  uint16_t cw[MAX_VLC_SIZE];
99  int maxbits;
100 
101  av_assert1(size > 0);
102 
103  for (int i = 0; i < size; i++)
104  counts[bits[i]]++;
105 
106  /* bits[0] is zero for some tables, i.e. syms actually starts at 1.
107  * So we reset it here. The code assigned to this element is 0x00. */
108  codes[0] = counts[0] = 0;
109  for (int i = 0; i < 16; i++) {
110  codes[i+1] = (codes[i] + counts[i]) << 1;
111  if (counts[i])
112  maxbits = i;
113  }
114  for (int i = 0; i < size; i++)
115  cw[i] = codes[bits[i]]++;
116 
117  vlc->table = &table_data[*offset];
119  ff_vlc_init_sparse(vlc, FFMIN(maxbits, 9), size,
120  bits, 1, 1,
121  cw, 2, 2,
122  syms, !!syms, !!syms, VLC_INIT_STATIC_OVERLONG);
123  *offset += vlc->table_size;
124 }
125 
126 static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp,
127  int *offset)
128 {
129  VLC vlc = { 0 };
131  *vlcp = vlc.table;
132 }
133 
134 /**
135  * Initialize all tables.
136  */
137 static av_cold void rv34_init_tables(void)
138 {
139  int i, j, k, offset = 0;
140 
141  for(i = 0; i < NUM_INTRA_TABLES; i++){
142  for(j = 0; j < 2; j++){
144  &intra_vlcs[i].cbppattern[j], &offset);
146  &intra_vlcs[i].second_pattern[j], &offset);
148  &intra_vlcs[i].third_pattern[j], &offset);
149  for(k = 0; k < 4; k++){
151  &intra_vlcs[i].cbp[j][k], rv34_cbp_code, &offset);
152  }
153  }
154  for(j = 0; j < 4; j++){
156  &intra_vlcs[i].first_pattern[j], &offset);
157  }
159  &intra_vlcs[i].coefficient, &offset);
160  }
161 
162  for(i = 0; i < NUM_INTER_TABLES; i++){
164  &inter_vlcs[i].cbppattern[0], &offset);
165  for(j = 0; j < 4; j++){
167  &inter_vlcs[i].cbp[0][j], rv34_cbp_code, &offset);
168  }
169  for(j = 0; j < 2; j++){
171  &inter_vlcs[i].first_pattern[j], &offset);
173  &inter_vlcs[i].second_pattern[j], &offset);
175  &inter_vlcs[i].third_pattern[j], &offset);
176  }
178  &inter_vlcs[i].coefficient, &offset);
179  }
180 }
181 
182 /** @} */ // vlc group
183 
184 /**
185  * @name RV30/40 4x4 block decoding functions
186  * @{
187  */
188 
189 /**
190  * Decode coded block pattern.
191  */
192 static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
193 {
194  int pattern, code, cbp=0;
195  int ones;
196  static const int cbp_masks[3] = {0x100000, 0x010000, 0x110000};
197  static const int shifts[4] = { 0, 2, 8, 10 };
198  const int *curshift = shifts;
199  int i, t, mask;
200 
201  code = get_vlc2(gb, vlc->cbppattern[table], 9, 2);
202  pattern = code & 0xF;
203  code >>= 4;
204 
205  ones = rv34_count_ones[pattern];
206 
207  for(mask = 8; mask; mask >>= 1, curshift++){
208  if(pattern & mask)
209  cbp |= get_vlc2(gb, vlc->cbp[table][ones].table, vlc->cbp[table][ones].bits, 1) << curshift[0];
210  }
211 
212  for(i = 0; i < 4; i++){
213  t = (modulo_three_table[code] >> (6 - 2*i)) & 3;
214  if(t == 1)
215  cbp |= cbp_masks[get_bits1(gb)] << i;
216  if(t == 2)
217  cbp |= cbp_masks[2] << i;
218  }
219  return cbp;
220 }
221 
222 /**
223  * Get one coefficient value from the bitstream and store it.
224  */
225 static inline void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb,
226  const VLCElem *vlc, int q)
227 {
228  if(coef){
229  if(coef == esc){
230  coef = get_vlc2(gb, vlc, 9, 2);
231  if(coef > 23){
232  coef -= 23;
233  coef = 22 + ((1 << coef) | get_bits(gb, coef));
234  }
235  coef += esc;
236  }
237  if(get_bits1(gb))
238  coef = -coef;
239  *dst = (coef*q + 8) >> 4;
240  }
241 }
242 
243 /**
244  * Decode 2x2 subblock of coefficients.
245  */
246 static inline void decode_subblock(int16_t *dst, int code, const int is_block2,
247  GetBitContext *gb, const VLCElem *vlc, int q)
248 {
250 
251  decode_coeff( dst+0*4+0, (flags >> 6) , 3, gb, vlc, q);
252  if(is_block2){
253  decode_coeff(dst+1*4+0, (flags >> 4) & 3, 2, gb, vlc, q);
254  decode_coeff(dst+0*4+1, (flags >> 2) & 3, 2, gb, vlc, q);
255  }else{
256  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q);
257  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q);
258  }
259  decode_coeff( dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q);
260 }
261 
262 /**
263  * Decode a single coefficient.
264  */
265 static inline void decode_subblock1(int16_t *dst, int code, GetBitContext *gb,
266  const VLCElem *vlc, int q)
267 {
268  int coeff = modulo_three_table[code] >> 6;
269  decode_coeff(dst, coeff, 3, gb, vlc, q);
270 }
271 
272 static inline void decode_subblock3(int16_t *dst, int code, GetBitContext *gb,
273  const VLCElem *vlc,
274  int q_dc, int q_ac1, int q_ac2)
275 {
277 
278  decode_coeff(dst+0*4+0, (flags >> 6) , 3, gb, vlc, q_dc);
279  decode_coeff(dst+0*4+1, (flags >> 4) & 3, 2, gb, vlc, q_ac1);
280  decode_coeff(dst+1*4+0, (flags >> 2) & 3, 2, gb, vlc, q_ac1);
281  decode_coeff(dst+1*4+1, (flags >> 0) & 3, 2, gb, vlc, q_ac2);
282 }
283 
284 /**
285  * Decode coefficients for 4x4 block.
286  *
287  * This is done by filling 2x2 subblocks with decoded coefficients
288  * in this order (the same for subblocks and subblock coefficients):
289  * o--o
290  * /
291  * /
292  * o--o
293  */
294 
295 static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc,
296  int fc, int sc, int q_dc, int q_ac1, int q_ac2)
297 {
298  int code, pattern, has_ac = 1;
299 
300  code = get_vlc2(gb, rvlc->first_pattern[fc], 9, 2);
301 
302  pattern = code & 0x7;
303 
304  code >>= 3;
305 
306  if (modulo_three_table[code] & 0x3F) {
307  decode_subblock3(dst, code, gb, rvlc->coefficient, q_dc, q_ac1, q_ac2);
308  } else {
309  decode_subblock1(dst, code, gb, rvlc->coefficient, q_dc);
310  if (!pattern)
311  return 0;
312  has_ac = 0;
313  }
314 
315  if(pattern & 4){
316  code = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
317  decode_subblock(dst + 4*0+2, code, 0, gb, rvlc->coefficient, q_ac2);
318  }
319  if(pattern & 2){ // Looks like coefficients 1 and 2 are swapped for this block
320  code = get_vlc2(gb, rvlc->second_pattern[sc], 9, 2);
321  decode_subblock(dst + 4*2+0, code, 1, gb, rvlc->coefficient, q_ac2);
322  }
323  if(pattern & 1){
324  code = get_vlc2(gb, rvlc->third_pattern[sc], 9, 2);
325  decode_subblock(dst + 4*2+2, code, 0, gb, rvlc->coefficient, q_ac2);
326  }
327  return has_ac | pattern;
328 }
329 
330 /**
331  * @name RV30/40 bitstream parsing
332  * @{
333  */
334 
335 /**
336  * Decode starting slice position.
337  * @todo Maybe replace with ff_h263_decode_mba() ?
338  */
340 {
341  int i;
342  for(i = 0; i < 5; i++)
343  if(rv34_mb_max_sizes[i] >= mb_size - 1)
344  break;
345  return rv34_mb_bits_sizes[i];
346 }
347 
348 /**
349  * Select VLC set for decoding from current quantizer, modifier and frame type.
350  */
351 static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
352 {
353  if(mod == 2 && quant < 19) quant += 10;
354  else if(mod && quant < 26) quant += 5;
355  av_assert2(quant >= 0 && quant < 32);
358 }
359 
360 /**
361  * Decode intra macroblock header and return CBP in case of success, -1 otherwise.
362  */
363 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
364 {
365  MpegEncContext *s = &r->s;
366  GetBitContext *gb = &s->gb;
367  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
368  int t;
369 
370  r->is16 = get_bits1(gb);
371  if(r->is16){
372  s->cur_pic.mb_type[mb_pos] = MB_TYPE_INTRA16x16;
373  r->block_type = RV34_MB_TYPE_INTRA16x16;
374  t = get_bits(gb, 2);
375  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
376  r->luma_vlc = 2;
377  }else{
378  if(!r->rv30){
379  if(!get_bits1(gb))
380  av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n");
381  }
382  s->cur_pic.mb_type[mb_pos] = MB_TYPE_INTRA;
383  r->block_type = RV34_MB_TYPE_INTRA;
384  if(r->decode_intra_types(r, gb, intra_types) < 0)
385  return -1;
386  r->luma_vlc = 1;
387  }
388 
389  r->chroma_vlc = 0;
390  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
391 
392  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
393 }
394 
395 /**
396  * Decode inter macroblock header and return CBP in case of success, -1 otherwise.
397  */
398 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
399 {
400  MpegEncContext *s = &r->s;
401  GetBitContext *gb = &s->gb;
402  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
403  int i, t;
404 
405  r->block_type = r->decode_mb_info(r);
406  if(r->block_type == -1)
407  return -1;
408  s->cur_pic.mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type];
409  r->mb_type[mb_pos] = r->block_type;
410  if(r->block_type == RV34_MB_SKIP){
411  if(s->pict_type == AV_PICTURE_TYPE_P)
412  r->mb_type[mb_pos] = RV34_MB_P_16x16;
413  if(s->pict_type == AV_PICTURE_TYPE_B)
414  r->mb_type[mb_pos] = RV34_MB_B_DIRECT;
415  }
416  r->is16 = !!IS_INTRA16x16(s->cur_pic.mb_type[mb_pos]);
417  if (rv34_decode_mv(r, r->block_type) < 0)
418  return -1;
419  if(r->block_type == RV34_MB_SKIP){
420  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, 0, sizeof(intra_types[0]));
421  return 0;
422  }
423  r->chroma_vlc = 1;
424  r->luma_vlc = 0;
425 
426  if (IS_INTRA(s->cur_pic.mb_type[mb_pos])) {
427  if(r->is16){
428  t = get_bits(gb, 2);
429  fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0]));
430  r->luma_vlc = 2;
431  }else{
432  if(r->decode_intra_types(r, gb, intra_types) < 0)
433  return -1;
434  r->luma_vlc = 1;
435  }
436  r->chroma_vlc = 0;
437  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
438  }else{
439  for(i = 0; i < 16; i++)
440  intra_types[(i & 3) + (i>>2) * r->intra_types_stride] = 0;
441  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
442  if(r->mb_type[mb_pos] == RV34_MB_P_MIX16x16){
443  r->is16 = 1;
444  r->chroma_vlc = 1;
445  r->luma_vlc = 2;
446  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 0);
447  }
448  }
449 
450  return rv34_decode_cbp(gb, r->cur_vlcs, r->is16);
451 }
452 
453 /** @} */ //bitstream functions
454 
455 /**
456  * @name motion vector related code (prediction, reconstruction, motion compensation)
457  * @{
458  */
459 
460 /** macroblock partition width in 8x8 blocks */
461 static const uint8_t part_sizes_w[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2 };
462 
463 /** macroblock partition height in 8x8 blocks */
464 static const uint8_t part_sizes_h[RV34_MB_TYPES] = { 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2 };
465 
466 /** availability index for subblocks */
467 static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
468 
469 /**
470  * motion vector prediction
471  *
472  * Motion prediction performed for the block by using median prediction of
473  * motion vectors from the left, top and right top blocks but in corner cases
474  * some other vectors may be used instead.
475  */
476 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
477 {
478  MpegEncContext *s = &r->s;
479  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
480  int A[2] = {0}, B[2], C[2];
481  int i, j;
482  int mx, my;
483  int* avail = r->avail_cache + avail_indexes[subblock_no];
484  int c_off = part_sizes_w[block_type];
485  int16_t (*motion_val)[2] = s->cur_pic.motion_val[0];
486 
487  mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride;
488  if(subblock_no == 3)
489  c_off = -1;
490 
491  if(avail[-1]){
492  A[0] = motion_val[mv_pos-1][0];
493  A[1] = motion_val[mv_pos-1][1];
494  }
495  if(avail[-4]){
496  B[0] = motion_val[mv_pos-s->b8_stride][0];
497  B[1] = motion_val[mv_pos-s->b8_stride][1];
498  }else{
499  B[0] = A[0];
500  B[1] = A[1];
501  }
502  if(!avail[c_off-4]){
503  if(avail[-4] && (avail[-1] || r->rv30)){
504  C[0] = motion_val[mv_pos-s->b8_stride-1][0];
505  C[1] = motion_val[mv_pos-s->b8_stride-1][1];
506  }else{
507  C[0] = A[0];
508  C[1] = A[1];
509  }
510  }else{
511  C[0] = motion_val[mv_pos-s->b8_stride+c_off][0];
512  C[1] = motion_val[mv_pos-s->b8_stride+c_off][1];
513  }
514  mx = mid_pred(A[0], B[0], C[0]);
515  my = mid_pred(A[1], B[1], C[1]);
516  mx += r->dmv[dmv_no][0];
517  my += r->dmv[dmv_no][1];
518  for(j = 0; j < part_sizes_h[block_type]; j++){
519  for(i = 0; i < part_sizes_w[block_type]; i++){
520  motion_val[mv_pos + i + j*s->b8_stride][0] = mx;
521  motion_val[mv_pos + i + j*s->b8_stride][1] = my;
522  }
523  }
524 }
525 
526 #define GET_PTS_DIFF(a, b) (((a) - (b) + 8192) & 0x1FFF)
527 
528 /**
529  * Calculate motion vector component that should be added for direct blocks.
530  */
531 static int calc_add_mv(RV34DecContext *r, int dir, int val)
532 {
533  int mul = dir ? -r->mv_weight2 : r->mv_weight1;
534 
535  return (int)(val * (SUINT)mul + 0x2000) >> 14;
536 }
537 
538 /**
539  * Predict motion vector for B-frame macroblock.
540  */
541 static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
542  int A_avail, int B_avail, int C_avail,
543  int *mx, int *my)
544 {
545  if(A_avail + B_avail + C_avail != 3){
546  *mx = A[0] + B[0] + C[0];
547  *my = A[1] + B[1] + C[1];
548  if(A_avail + B_avail + C_avail == 2){
549  *mx /= 2;
550  *my /= 2;
551  }
552  }else{
553  *mx = mid_pred(A[0], B[0], C[0]);
554  *my = mid_pred(A[1], B[1], C[1]);
555  }
556 }
557 
558 /**
559  * motion vector prediction for B-frames
560  */
561 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
562 {
563  MpegEncContext *s = &r->s;
564  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
565  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
566  int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
567  int has_A = 0, has_B = 0, has_C = 0;
568  int mx, my;
569  int i, j;
570  MPVWorkPicture *cur_pic = &s->cur_pic;
571  const int mask = dir ? MB_TYPE_BACKWARD_MV : MB_TYPE_FORWARD_MV;
572  int type = cur_pic->mb_type[mb_pos];
573 
574  if((r->avail_cache[6-1] & type) & mask){
575  A[0] = cur_pic->motion_val[dir][mv_pos - 1][0];
576  A[1] = cur_pic->motion_val[dir][mv_pos - 1][1];
577  has_A = 1;
578  }
579  if((r->avail_cache[6-4] & type) & mask){
580  B[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][0];
581  B[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride][1];
582  has_B = 1;
583  }
584  if(r->avail_cache[6-4] && (r->avail_cache[6-2] & type) & mask){
585  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][0];
586  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride + 2][1];
587  has_C = 1;
588  }else if((s->mb_x+1) == s->mb_width && (r->avail_cache[6-5] & type) & mask){
589  C[0] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][0];
590  C[1] = cur_pic->motion_val[dir][mv_pos - s->b8_stride - 1][1];
591  has_C = 1;
592  }
593 
594  rv34_pred_b_vector(A, B, C, has_A, has_B, has_C, &mx, &my);
595 
596  mx += r->dmv[dir][0];
597  my += r->dmv[dir][1];
598 
599  for(j = 0; j < 2; j++){
600  for(i = 0; i < 2; i++){
601  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][0] = mx;
602  cur_pic->motion_val[dir][mv_pos + i + j*s->b8_stride][1] = my;
603  }
604  }
605  if(block_type == RV34_MB_B_BACKWARD || block_type == RV34_MB_B_FORWARD){
606  ZERO8x2(cur_pic->motion_val[!dir][mv_pos], s->b8_stride);
607  }
608 }
609 
610 /**
611  * motion vector prediction - RV3 version
612  */
613 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
614 {
615  MpegEncContext *s = &r->s;
616  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
617  int A[2] = {0}, B[2], C[2];
618  int i, j, k;
619  int mx, my;
620  int* avail = r->avail_cache + avail_indexes[0];
621 
622  if(avail[-1]){
623  A[0] = s->cur_pic.motion_val[0][mv_pos - 1][0];
624  A[1] = s->cur_pic.motion_val[0][mv_pos - 1][1];
625  }
626  if(avail[-4]){
627  B[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride][0];
628  B[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride][1];
629  }else{
630  B[0] = A[0];
631  B[1] = A[1];
632  }
633  if(!avail[-4 + 2]){
634  if(avail[-4] && (avail[-1])){
635  C[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride - 1][0];
636  C[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride - 1][1];
637  }else{
638  C[0] = A[0];
639  C[1] = A[1];
640  }
641  }else{
642  C[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride + 2][0];
643  C[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride + 2][1];
644  }
645  mx = mid_pred(A[0], B[0], C[0]);
646  my = mid_pred(A[1], B[1], C[1]);
647  mx += r->dmv[0][0];
648  my += r->dmv[0][1];
649  for(j = 0; j < 2; j++){
650  for(i = 0; i < 2; i++){
651  for(k = 0; k < 2; k++){
652  s->cur_pic.motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx;
653  s->cur_pic.motion_val[k][mv_pos + i + j*s->b8_stride][1] = my;
654  }
655  }
656  }
657 }
658 
659 static const int chroma_coeffs[3] = { 0, 3, 5 };
660 
661 /**
662  * generic motion compensation function
663  *
664  * @param r decoder context
665  * @param block_type type of the current block
666  * @param xoff horizontal offset from the start of the current block
667  * @param yoff vertical offset from the start of the current block
668  * @param mv_off offset to the motion vector information
669  * @param width width of the current partition in 8x8 blocks
670  * @param height height of the current partition in 8x8 blocks
671  * @param dir motion compensation direction (i.e. from the last or the next reference frame)
672  * @param thirdpel motion vectors are specified in 1/3 of pixel
673  * @param qpel_mc a set of functions used to perform luma motion compensation
674  * @param chroma_mc a set of functions used to perform chroma motion compensation
675  */
676 static inline void rv34_mc(RV34DecContext *r, const int block_type,
677  const int xoff, const int yoff, int mv_off,
678  const int width, const int height, int dir,
679  const int thirdpel, int weighted,
680  qpel_mc_func (*qpel_mc)[16],
682 {
683  MpegEncContext *s = &r->s;
684  uint8_t *Y, *U, *V;
685  const uint8_t *srcY, *srcU, *srcV;
686  int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
687  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
688  int is16x16 = 1;
689  int emu = 0;
690  int16_t *motion_val = s->cur_pic.motion_val[dir][mv_pos];
691 
692  if(thirdpel){
693  int chroma_mx, chroma_my;
694  mx = (motion_val[0] + (3 << 24)) / 3 - (1 << 24);
695  my = (motion_val[1] + (3 << 24)) / 3 - (1 << 24);
696  lx = (motion_val[0] + (3 << 24)) % 3;
697  ly = (motion_val[1] + (3 << 24)) % 3;
698  chroma_mx = motion_val[0] / 2;
699  chroma_my = motion_val[1] / 2;
700  umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24);
701  umy = (chroma_my + (3 << 24)) / 3 - (1 << 24);
702  uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3];
703  uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3];
704  }else{
705  int cx, cy;
706  mx = motion_val[0] >> 2;
707  my = motion_val[1] >> 2;
708  lx = motion_val[0] & 3;
709  ly = motion_val[1] & 3;
710  cx = motion_val[0] / 2;
711  cy = motion_val[1] / 2;
712  umx = cx >> 2;
713  umy = cy >> 2;
714  uvmx = (cx & 3) << 1;
715  uvmy = (cy & 3) << 1;
716  //due to some flaw RV40 uses the same MC compensation routine for H2V2 and H3V3
717  if(uvmx == 6 && uvmy == 6)
718  uvmx = uvmy = 4;
719  }
720 
721  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
722  /* wait for the referenced mb row to be finished */
723  int mb_row = s->mb_y + ((yoff + my + 5 + 8 * height) >> 4);
724  const ThreadProgress *p = dir ? &s->next_pic.ptr->progress : &s->last_pic.ptr->progress;
725  ff_thread_progress_await(p, mb_row);
726  }
727 
728  dxy = ly*4 + lx;
729  srcY = dir ? s->next_pic.data[0] : s->last_pic.data[0];
730  srcU = dir ? s->next_pic.data[1] : s->last_pic.data[1];
731  srcV = dir ? s->next_pic.data[2] : s->last_pic.data[2];
732  src_x = s->mb_x * 16 + xoff + mx;
733  src_y = s->mb_y * 16 + yoff + my;
734  uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx;
735  uvsrc_y = s->mb_y * 8 + (yoff >> 1) + umy;
736  srcY += src_y * s->linesize + src_x;
737  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
738  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
739  if(s->h_edge_pos - (width << 3) < 6 || s->v_edge_pos - (height << 3) < 6 ||
740  (unsigned)(src_x - !!lx*2) > s->h_edge_pos - !!lx*2 - (width <<3) - 4 ||
741  (unsigned)(src_y - !!ly*2) > s->v_edge_pos - !!ly*2 - (height<<3) - 4) {
742  srcY -= 2 + 2*s->linesize;
743  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
744  s->linesize, s->linesize,
745  (width << 3) + 6, (height << 3) + 6,
746  src_x - 2, src_y - 2,
747  s->h_edge_pos, s->v_edge_pos);
748  srcY = s->sc.edge_emu_buffer + 2 + 2*s->linesize;
749  emu = 1;
750  }
751  if(!weighted){
752  Y = s->dest[0] + xoff + yoff *s->linesize;
753  U = s->dest[1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
754  V = s->dest[2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
755  }else{
756  Y = r->tmp_b_block_y [dir] + xoff + yoff *s->linesize;
757  U = r->tmp_b_block_uv[dir*2] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
758  V = r->tmp_b_block_uv[dir*2+1] + (xoff>>1) + (yoff>>1)*s->uvlinesize;
759  }
760 
761  if(block_type == RV34_MB_P_16x8){
762  qpel_mc[1][dxy](Y, srcY, s->linesize);
763  Y += 8;
764  srcY += 8;
765  }else if(block_type == RV34_MB_P_8x16){
766  qpel_mc[1][dxy](Y, srcY, s->linesize);
767  Y += 8 * s->linesize;
768  srcY += 8 * s->linesize;
769  }
770  is16x16 = (block_type != RV34_MB_P_8x8) && (block_type != RV34_MB_P_16x8) && (block_type != RV34_MB_P_8x16);
771  qpel_mc[!is16x16][dxy](Y, srcY, s->linesize);
772  if (emu) {
773  uint8_t *uvbuf = s->sc.edge_emu_buffer;
774 
775  s->vdsp.emulated_edge_mc(uvbuf, srcU,
776  s->uvlinesize, s->uvlinesize,
777  (width << 2) + 1, (height << 2) + 1,
778  uvsrc_x, uvsrc_y,
779  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
780  srcU = uvbuf;
781  uvbuf += 9*s->uvlinesize;
782 
783  s->vdsp.emulated_edge_mc(uvbuf, srcV,
784  s->uvlinesize, s->uvlinesize,
785  (width << 2) + 1, (height << 2) + 1,
786  uvsrc_x, uvsrc_y,
787  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
788  srcV = uvbuf;
789  }
790  chroma_mc[2-width] (U, srcU, s->uvlinesize, height*4, uvmx, uvmy);
791  chroma_mc[2-width] (V, srcV, s->uvlinesize, height*4, uvmx, uvmy);
792 }
793 
794 static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
795  const int xoff, const int yoff, int mv_off,
796  const int width, const int height, int dir)
797 {
798  rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, 0,
799  r->rdsp.put_pixels_tab,
800  r->rdsp.put_chroma_pixels_tab);
801 }
802 
804 {
805  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.dest[0],
806  r->tmp_b_block_y[0],
807  r->tmp_b_block_y[1],
808  r->weight1,
809  r->weight2,
810  r->s.linesize);
811  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[1],
812  r->tmp_b_block_uv[0],
813  r->tmp_b_block_uv[2],
814  r->weight1,
815  r->weight2,
816  r->s.uvlinesize);
817  r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[2],
818  r->tmp_b_block_uv[1],
819  r->tmp_b_block_uv[3],
820  r->weight1,
821  r->weight2,
822  r->s.uvlinesize);
823 }
824 
825 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
826 {
827  int weighted = !r->rv30 && block_type != RV34_MB_B_BIDIR && r->weight1 != 8192;
828 
829  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, weighted,
830  r->rdsp.put_pixels_tab,
831  r->rdsp.put_chroma_pixels_tab);
832  if(!weighted){
833  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 0,
834  r->rdsp.avg_pixels_tab,
835  r->rdsp.avg_chroma_pixels_tab);
836  }else{
837  rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, 1,
838  r->rdsp.put_pixels_tab,
839  r->rdsp.put_chroma_pixels_tab);
840  rv4_weight(r);
841  }
842 }
843 
845 {
846  int i, j;
847  int weighted = !r->rv30 && r->weight1 != 8192;
848 
849  for(j = 0; j < 2; j++)
850  for(i = 0; i < 2; i++){
851  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
852  weighted,
853  r->rdsp.put_pixels_tab,
854  r->rdsp.put_chroma_pixels_tab);
855  rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
856  weighted,
857  weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
858  weighted ? r->rdsp.put_chroma_pixels_tab : r->rdsp.avg_chroma_pixels_tab);
859  }
860  if(weighted)
861  rv4_weight(r);
862 }
863 
864 /** number of motion vectors in each macroblock type */
865 static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 };
866 
867 /**
868  * Decode motion vector differences
869  * and perform motion vector reconstruction and motion compensation.
870  */
871 static int rv34_decode_mv(RV34DecContext *r, int block_type)
872 {
873  MpegEncContext *s = &r->s;
874  GetBitContext *gb = &s->gb;
875  int i, j, k, l;
876  int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
877  int next_bt;
878 
879  memset(r->dmv, 0, sizeof(r->dmv));
880  for(i = 0; i < num_mvs[block_type]; i++){
881  r->dmv[i][0] = get_interleaved_se_golomb(gb);
882  r->dmv[i][1] = get_interleaved_se_golomb(gb);
883  if (r->dmv[i][0] == INVALID_VLC ||
884  r->dmv[i][1] == INVALID_VLC) {
885  r->dmv[i][0] = r->dmv[i][1] = 0;
886  return AVERROR_INVALIDDATA;
887  }
888  }
889  switch(block_type){
890  case RV34_MB_TYPE_INTRA:
892  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
893  return 0;
894  case RV34_MB_SKIP:
895  if(s->pict_type == AV_PICTURE_TYPE_P){
896  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
897  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
898  break;
899  }
900  case RV34_MB_B_DIRECT:
901  //surprisingly, it uses motion scheme from next reference frame
902  /* wait for the current mb row to be finished */
903  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
904  ff_thread_progress_await(&s->next_pic.ptr->progress, FFMAX(0, s->mb_y-1));
905 
906  next_bt = s->next_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride];
907  if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){
908  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
909  ZERO8x2(s->cur_pic.motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
910  }else
911  for(j = 0; j < 2; j++)
912  for(i = 0; i < 2; i++)
913  for(k = 0; k < 2; k++)
914  for(l = 0; l < 2; l++)
915  s->cur_pic.motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_pic.motion_val[0][mv_pos + i + j*s->b8_stride][k]);
916  if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC
917  rv34_mc_2mv(r, block_type);
918  else
920  ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride);
921  break;
922  case RV34_MB_P_16x16:
923  case RV34_MB_P_MIX16x16:
924  rv34_pred_mv(r, block_type, 0, 0);
925  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0);
926  break;
927  case RV34_MB_B_FORWARD:
928  case RV34_MB_B_BACKWARD:
929  r->dmv[1][0] = r->dmv[0][0];
930  r->dmv[1][1] = r->dmv[0][1];
931  if(r->rv30)
932  rv34_pred_mv_rv3(r, block_type, block_type == RV34_MB_B_BACKWARD);
933  else
934  rv34_pred_mv_b (r, block_type, block_type == RV34_MB_B_BACKWARD);
935  rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, block_type == RV34_MB_B_BACKWARD);
936  break;
937  case RV34_MB_P_16x8:
938  case RV34_MB_P_8x16:
939  rv34_pred_mv(r, block_type, 0, 0);
940  rv34_pred_mv(r, block_type, 1 + (block_type == RV34_MB_P_16x8), 1);
941  if(block_type == RV34_MB_P_16x8){
942  rv34_mc_1mv(r, block_type, 0, 0, 0, 2, 1, 0);
943  rv34_mc_1mv(r, block_type, 0, 8, s->b8_stride, 2, 1, 0);
944  }
945  if(block_type == RV34_MB_P_8x16){
946  rv34_mc_1mv(r, block_type, 0, 0, 0, 1, 2, 0);
947  rv34_mc_1mv(r, block_type, 8, 0, 1, 1, 2, 0);
948  }
949  break;
950  case RV34_MB_B_BIDIR:
951  rv34_pred_mv_b (r, block_type, 0);
952  rv34_pred_mv_b (r, block_type, 1);
953  rv34_mc_2mv (r, block_type);
954  break;
955  case RV34_MB_P_8x8:
956  for(i=0;i< 4;i++){
957  rv34_pred_mv(r, block_type, i, i);
958  rv34_mc_1mv (r, block_type, (i&1)<<3, (i&2)<<2, (i&1)+(i>>1)*s->b8_stride, 1, 1, 0);
959  }
960  break;
961  }
962 
963  return 0;
964 }
965 /** @} */ // mv group
966 
967 /**
968  * @name Macroblock reconstruction functions
969  * @{
970  */
971 /** mapping of RV30/40 intra prediction types to standard H.264 types */
972 static const int ittrans[9] = {
975 };
976 
977 /** mapping of RV30/40 intra 16x16 prediction types to standard H.264 types */
978 static const int ittrans16[4] = {
980 };
981 
982 /**
983  * Perform 4x4 intra prediction.
984  */
985 static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
986 {
987  uint8_t *prev = dst - stride + 4;
988  uint32_t topleft;
989 
990  if(!up && !left)
991  itype = DC_128_PRED;
992  else if(!up){
993  if(itype == VERT_PRED) itype = HOR_PRED;
994  if(itype == DC_PRED) itype = LEFT_DC_PRED;
995  }else if(!left){
996  if(itype == HOR_PRED) itype = VERT_PRED;
997  if(itype == DC_PRED) itype = TOP_DC_PRED;
999  }
1000  if(!down){
1002  if(itype == HOR_UP_PRED) itype = HOR_UP_PRED_RV40_NODOWN;
1003  if(itype == VERT_LEFT_PRED) itype = VERT_LEFT_PRED_RV40_NODOWN;
1004  }
1005  if(!right && up){
1006  topleft = dst[-stride + 3] * 0x01010101u;
1007  prev = (uint8_t*)&topleft;
1008  }
1009  r->h.pred4x4[itype](dst, prev, stride);
1010 }
1011 
1012 static inline int adjust_pred16(int itype, int up, int left)
1013 {
1014  if(!up && !left)
1015  itype = DC_128_PRED8x8;
1016  else if(!up){
1017  if(itype == PLANE_PRED8x8)itype = HOR_PRED8x8;
1018  if(itype == VERT_PRED8x8) itype = HOR_PRED8x8;
1019  if(itype == DC_PRED8x8) itype = LEFT_DC_PRED8x8;
1020  }else if(!left){
1021  if(itype == PLANE_PRED8x8)itype = VERT_PRED8x8;
1022  if(itype == HOR_PRED8x8) itype = VERT_PRED8x8;
1023  if(itype == DC_PRED8x8) itype = TOP_DC_PRED8x8;
1024  }
1025  return itype;
1026 }
1027 
1029  uint8_t *pdst, int stride,
1030  int fc, int sc, int q_dc, int q_ac)
1031 {
1032  MpegEncContext *s = &r->s;
1033  int16_t *ptr = s->block[0];
1034  int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
1035  fc, sc, q_dc, q_ac, q_ac);
1036  if(has_ac){
1037  r->rdsp.rv34_idct_add(pdst, stride, ptr);
1038  }else{
1039  r->rdsp.rv34_idct_dc_add(pdst, stride, ptr[0]);
1040  ptr[0] = 0;
1041  }
1042 }
1043 
1044 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
1045 {
1046  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1047  MpegEncContext *s = &r->s;
1048  GetBitContext *gb = &s->gb;
1049  int q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
1050  q_ac = rv34_qscale_tab[s->qscale];
1051  uint8_t *dst = s->dest[0];
1052  int16_t *ptr = s->block[0];
1053  int i, j, itype, has_ac;
1054 
1055  memset(block16, 0, 16 * sizeof(*block16));
1056 
1057  has_ac = rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac);
1058  if(has_ac)
1059  r->rdsp.rv34_inv_transform(block16);
1060  else
1061  r->rdsp.rv34_inv_transform_dc(block16);
1062 
1063  itype = ittrans16[intra_types[0]];
1064  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1065  r->h.pred16x16[itype](dst, s->linesize);
1066 
1067  for(j = 0; j < 4; j++){
1068  for(i = 0; i < 4; i++, cbp >>= 1){
1069  int dc = block16[i + j*4];
1070 
1071  if(cbp & 1){
1072  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1073  }else
1074  has_ac = 0;
1075 
1076  if(has_ac){
1077  ptr[0] = dc;
1078  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1079  }else
1080  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1081  }
1082 
1083  dst += 4*s->linesize;
1084  }
1085 
1086  itype = ittrans16[intra_types[0]];
1087  if(itype == PLANE_PRED8x8) itype = DC_PRED8x8;
1088  itype = adjust_pred16(itype, r->avail_cache[6-4], r->avail_cache[6-1]);
1089 
1090  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1091  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1092 
1093  for(j = 1; j < 3; j++){
1094  dst = s->dest[j];
1095  r->h.pred8x8[itype](dst, s->uvlinesize);
1096  for(i = 0; i < 4; i++, cbp >>= 1){
1097  uint8_t *pdst;
1098  if(!(cbp & 1)) continue;
1099  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1100 
1101  rv34_process_block(r, pdst, s->uvlinesize,
1102  r->chroma_vlc, 1, q_dc, q_ac);
1103  }
1104  }
1105 }
1106 
1107 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
1108 {
1109  MpegEncContext *s = &r->s;
1110  uint8_t *dst = s->dest[0];
1111  int avail[6*8] = {0};
1112  int i, j, k;
1113  int idx, q_ac, q_dc;
1114 
1115  // Set neighbour information.
1116  if(r->avail_cache[1])
1117  avail[0] = 1;
1118  if(r->avail_cache[2])
1119  avail[1] = avail[2] = 1;
1120  if(r->avail_cache[3])
1121  avail[3] = avail[4] = 1;
1122  if(r->avail_cache[4])
1123  avail[5] = 1;
1124  if(r->avail_cache[5])
1125  avail[8] = avail[16] = 1;
1126  if(r->avail_cache[9])
1127  avail[24] = avail[32] = 1;
1128 
1129  q_ac = rv34_qscale_tab[s->qscale];
1130  for(j = 0; j < 4; j++){
1131  idx = 9 + j*8;
1132  for(i = 0; i < 4; i++, cbp >>= 1, dst += 4, idx++){
1133  rv34_pred_4x4_block(r, dst, s->linesize, ittrans[intra_types[i]], avail[idx-8], avail[idx-1], avail[idx+7], avail[idx-7]);
1134  avail[idx] = 1;
1135  if(!(cbp & 1)) continue;
1136 
1137  rv34_process_block(r, dst, s->linesize,
1138  r->luma_vlc, 0, q_ac, q_ac);
1139  }
1140  dst += s->linesize * 4 - 4*4;
1141  intra_types += r->intra_types_stride;
1142  }
1143 
1144  intra_types -= r->intra_types_stride * 4;
1145 
1146  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1147  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1148 
1149  for(k = 0; k < 2; k++){
1150  dst = s->dest[1+k];
1151  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 0, 4);
1152 
1153  for(j = 0; j < 2; j++){
1154  int* acache = r->avail_cache + 6 + j*4;
1155  for(i = 0; i < 2; i++, cbp >>= 1, acache++){
1156  int itype = ittrans[intra_types[i*2+j*2*r->intra_types_stride]];
1157  rv34_pred_4x4_block(r, dst+4*i, s->uvlinesize, itype, acache[-4], acache[-1], !i && !j, acache[-3]);
1158  acache[0] = 1;
1159 
1160  if(!(cbp&1)) continue;
1161 
1162  rv34_process_block(r, dst + 4*i, s->uvlinesize,
1163  r->chroma_vlc, 1, q_dc, q_ac);
1164  }
1165 
1166  dst += 4*s->uvlinesize;
1167  }
1168  }
1169 }
1170 
1171 static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
1172 {
1173  int d;
1174  d = motion_val[0][0] - motion_val[-step][0];
1175  if(d < -3 || d > 3)
1176  return 1;
1177  d = motion_val[0][1] - motion_val[-step][1];
1178  if(d < -3 || d > 3)
1179  return 1;
1180  return 0;
1181 }
1182 
1184 {
1185  MpegEncContext *s = &r->s;
1186  int hmvmask = 0, vmvmask = 0, i, j;
1187  int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
1188  int16_t (*motion_val)[2] = &s->cur_pic.motion_val[0][midx];
1189  for(j = 0; j < 16; j += 8){
1190  for(i = 0; i < 2; i++){
1191  if(is_mv_diff_gt_3(motion_val + i, 1))
1192  vmvmask |= 0x11 << (j + i*2);
1193  if((j || s->mb_y) && is_mv_diff_gt_3(motion_val + i, s->b8_stride))
1194  hmvmask |= 0x03 << (j + i*2);
1195  }
1196  motion_val += s->b8_stride;
1197  }
1198  if(s->first_slice_line)
1199  hmvmask &= ~0x000F;
1200  if(!s->mb_x)
1201  vmvmask &= ~0x1111;
1202  if(r->rv30){ //RV30 marks both subblocks on the edge for filtering
1203  vmvmask |= (vmvmask & 0x4444) >> 1;
1204  hmvmask |= (hmvmask & 0x0F00) >> 4;
1205  if(s->mb_x)
1206  r->deblock_coefs[s->mb_x - 1 + s->mb_y*s->mb_stride] |= (vmvmask & 0x1111) << 3;
1207  if(!s->first_slice_line)
1208  r->deblock_coefs[s->mb_x + (s->mb_y - 1)*s->mb_stride] |= (hmvmask & 0xF) << 12;
1209  }
1210  return hmvmask | vmvmask;
1211 }
1212 
1213 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
1214 {
1215  MpegEncContext *s = &r->s;
1216  GetBitContext *gb = &s->gb;
1217  uint8_t *dst = s->dest[0];
1218  int16_t *ptr = s->block[0];
1219  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1220  int cbp, cbp2;
1221  int q_dc, q_ac, has_ac;
1222  int i, j;
1223  int dist;
1224 
1225  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1226  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1227  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1228  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1229  if(s->mb_x && dist)
1230  r->avail_cache[5] =
1231  r->avail_cache[9] = s->cur_pic.mb_type[mb_pos - 1];
1232  if(dist >= s->mb_width)
1233  r->avail_cache[2] =
1234  r->avail_cache[3] = s->cur_pic.mb_type[mb_pos - s->mb_stride];
1235  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1236  r->avail_cache[4] = s->cur_pic.mb_type[mb_pos - s->mb_stride + 1];
1237  if(s->mb_x && dist > s->mb_width)
1238  r->avail_cache[1] = s->cur_pic.mb_type[mb_pos - s->mb_stride - 1];
1239 
1240  s->qscale = r->si.quant;
1241  cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types);
1242  r->cbp_luma [mb_pos] = cbp;
1243  r->cbp_chroma[mb_pos] = cbp >> 16;
1244  r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos];
1245  s->cur_pic.qscale_table[mb_pos] = s->qscale;
1246 
1247  if(cbp == -1)
1248  return -1;
1249 
1250  if (IS_INTRA(s->cur_pic.mb_type[mb_pos])) {
1251  if(r->is16) rv34_output_i16x16(r, intra_types, cbp);
1252  else rv34_output_intra(r, intra_types, cbp);
1253  return 0;
1254  }
1255 
1256  if(r->is16){
1257  // Only for RV34_MB_P_MIX16x16
1258  LOCAL_ALIGNED_16(int16_t, block16, [16]);
1259  memset(block16, 0, 16 * sizeof(*block16));
1260  q_dc = rv34_qscale_tab[ r->luma_dc_quant_p[s->qscale] ];
1261  q_ac = rv34_qscale_tab[s->qscale];
1262  if (rv34_decode_block(block16, gb, r->cur_vlcs, 3, 0, q_dc, q_dc, q_ac))
1263  r->rdsp.rv34_inv_transform(block16);
1264  else
1265  r->rdsp.rv34_inv_transform_dc(block16);
1266 
1267  q_ac = rv34_qscale_tab[s->qscale];
1268 
1269  for(j = 0; j < 4; j++){
1270  for(i = 0; i < 4; i++, cbp >>= 1){
1271  int dc = block16[i + j*4];
1272 
1273  if(cbp & 1){
1274  has_ac = rv34_decode_block(ptr, gb, r->cur_vlcs, r->luma_vlc, 0, q_ac, q_ac, q_ac);
1275  }else
1276  has_ac = 0;
1277 
1278  if(has_ac){
1279  ptr[0] = dc;
1280  r->rdsp.rv34_idct_add(dst+4*i, s->linesize, ptr);
1281  }else
1282  r->rdsp.rv34_idct_dc_add(dst+4*i, s->linesize, dc);
1283  }
1284 
1285  dst += 4*s->linesize;
1286  }
1287 
1288  r->cur_vlcs = choose_vlc_set(r->si.quant, r->si.vlc_set, 1);
1289  }else{
1290  q_ac = rv34_qscale_tab[s->qscale];
1291 
1292  for(j = 0; j < 4; j++){
1293  for(i = 0; i < 4; i++, cbp >>= 1){
1294  if(!(cbp & 1)) continue;
1295 
1296  rv34_process_block(r, dst + 4*i, s->linesize,
1297  r->luma_vlc, 0, q_ac, q_ac);
1298  }
1299  dst += 4*s->linesize;
1300  }
1301  }
1302 
1303  q_dc = rv34_qscale_tab[rv34_chroma_quant[1][s->qscale]];
1304  q_ac = rv34_qscale_tab[rv34_chroma_quant[0][s->qscale]];
1305 
1306  for(j = 1; j < 3; j++){
1307  dst = s->dest[j];
1308  for(i = 0; i < 4; i++, cbp >>= 1){
1309  uint8_t *pdst;
1310  if(!(cbp & 1)) continue;
1311  pdst = dst + (i&1)*4 + (i&2)*2*s->uvlinesize;
1312 
1313  rv34_process_block(r, pdst, s->uvlinesize,
1314  r->chroma_vlc, 1, q_dc, q_ac);
1315  }
1316  }
1317 
1318  return 0;
1319 }
1320 
1321 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
1322 {
1323  MpegEncContext *s = &r->s;
1324  int cbp, dist;
1325  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
1326 
1327  // Calculate which neighbours are available. Maybe it's worth optimizing too.
1328  memset(r->avail_cache, 0, sizeof(r->avail_cache));
1329  fill_rectangle(r->avail_cache + 6, 2, 2, 4, 1, 4);
1330  dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width;
1331  if(s->mb_x && dist)
1332  r->avail_cache[5] =
1333  r->avail_cache[9] = s->cur_pic.mb_type[mb_pos - 1];
1334  if(dist >= s->mb_width)
1335  r->avail_cache[2] =
1336  r->avail_cache[3] = s->cur_pic.mb_type[mb_pos - s->mb_stride];
1337  if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1)
1338  r->avail_cache[4] = s->cur_pic.mb_type[mb_pos - s->mb_stride + 1];
1339  if(s->mb_x && dist > s->mb_width)
1340  r->avail_cache[1] = s->cur_pic.mb_type[mb_pos - s->mb_stride - 1];
1341 
1342  s->qscale = r->si.quant;
1343  cbp = rv34_decode_intra_mb_header(r, intra_types);
1344  r->cbp_luma [mb_pos] = cbp;
1345  r->cbp_chroma[mb_pos] = cbp >> 16;
1346  r->deblock_coefs[mb_pos] = 0xFFFF;
1347  s->cur_pic.qscale_table[mb_pos] = s->qscale;
1348 
1349  if(cbp == -1)
1350  return -1;
1351 
1352  if(r->is16){
1353  rv34_output_i16x16(r, intra_types, cbp);
1354  return 0;
1355  }
1356 
1357  rv34_output_intra(r, intra_types, cbp);
1358  return 0;
1359 }
1360 
1362 {
1363  int bits;
1364  if(s->mb_y >= s->mb_height)
1365  return 1;
1366  if(!s->mb_num_left)
1367  return 1;
1368  if(r->s.mb_skip_run > 1)
1369  return 0;
1370  bits = get_bits_left(&s->gb);
1371  if(bits <= 0 || (bits < 8 && !show_bits(&s->gb, bits)))
1372  return 1;
1373  return 0;
1374 }
1375 
1376 
1378 {
1379  av_freep(&r->intra_types_hist);
1380  r->intra_types = NULL;
1381  av_freep(&r->tmp_b_block_base);
1382  av_freep(&r->mb_type);
1383  av_freep(&r->cbp_luma);
1384  av_freep(&r->cbp_chroma);
1385  av_freep(&r->deblock_coefs);
1386 }
1387 
1388 
1390 {
1391  r->intra_types_stride = r->s.mb_width * 4 + 4;
1392 
1393  r->cbp_chroma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1394  sizeof(*r->cbp_chroma));
1395  r->cbp_luma = av_mallocz(r->s.mb_stride * r->s.mb_height *
1396  sizeof(*r->cbp_luma));
1397  r->deblock_coefs = av_mallocz(r->s.mb_stride * r->s.mb_height *
1398  sizeof(*r->deblock_coefs));
1399  r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
1400  sizeof(*r->intra_types_hist));
1401  r->mb_type = av_mallocz(r->s.mb_stride * r->s.mb_height *
1402  sizeof(*r->mb_type));
1403 
1404  if (!(r->cbp_chroma && r->cbp_luma && r->deblock_coefs &&
1405  r->intra_types_hist && r->mb_type)) {
1406  r->s.context_reinit = 1;
1408  return AVERROR(ENOMEM);
1409  }
1410 
1411  r->intra_types = r->intra_types_hist + r->intra_types_stride * 4;
1412 
1413  return 0;
1414 }
1415 
1416 
1418 {
1420  return rv34_decoder_alloc(r);
1421 }
1422 
1423 
1424 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
1425 {
1426  MpegEncContext *s = &r->s;
1427  GetBitContext *gb = &s->gb;
1428  int mb_pos, slice_type;
1429  int res;
1430 
1431  init_get_bits(&r->s.gb, buf, buf_size*8);
1432  res = r->parse_slice_header(r, gb, &r->si);
1433  if(res < 0){
1434  av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
1435  return -1;
1436  }
1437 
1438  slice_type = r->si.type ? r->si.type : AV_PICTURE_TYPE_I;
1439  if (slice_type != s->pict_type) {
1440  av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
1441  return AVERROR_INVALIDDATA;
1442  }
1443  if (s->width != r->si.width || s->height != r->si.height) {
1444  av_log(s->avctx, AV_LOG_ERROR, "Size mismatch\n");
1445  return AVERROR_INVALIDDATA;
1446  }
1447 
1448  r->si.end = end;
1449  s->qscale = r->si.quant;
1450  s->mb_num_left = r->si.end - r->si.start;
1451  r->s.mb_skip_run = 0;
1452 
1453  mb_pos = s->mb_x + s->mb_y * s->mb_width;
1454  if(r->si.start != mb_pos){
1455  av_log(s->avctx, AV_LOG_ERROR, "Slice indicates MB offset %d, got %d\n", r->si.start, mb_pos);
1456  s->mb_x = r->si.start % s->mb_width;
1457  s->mb_y = r->si.start / s->mb_width;
1458  }
1459  memset(r->intra_types_hist, -1, r->intra_types_stride * 4 * 2 * sizeof(*r->intra_types_hist));
1460  s->first_slice_line = 1;
1461  s->resync_mb_x = s->mb_x;
1462  s->resync_mb_y = s->mb_y;
1463 
1465  while(!check_slice_end(r, s)) {
1466  ff_update_block_index(s, 8, 0, 1);
1467 
1468  if(r->si.type)
1469  res = rv34_decode_inter_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1470  else
1471  res = rv34_decode_intra_macroblock(r, r->intra_types + s->mb_x * 4 + 4);
1472  if(res < 0){
1473  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_ERROR);
1474  return -1;
1475  }
1476  if (++s->mb_x == s->mb_width) {
1477  s->mb_x = 0;
1478  s->mb_y++;
1480 
1481  memmove(r->intra_types_hist, r->intra_types, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1482  memset(r->intra_types, -1, r->intra_types_stride * 4 * sizeof(*r->intra_types_hist));
1483 
1484  if(r->loop_filter && s->mb_y >= 2)
1485  r->loop_filter(r, s->mb_y - 2);
1486 
1487  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1488  ff_thread_progress_report(&s->cur_pic.ptr->progress,
1489  s->mb_y - 2);
1490 
1491  }
1492  if(s->mb_x == s->resync_mb_x)
1493  s->first_slice_line=0;
1494  s->mb_num_left--;
1495  }
1496  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);
1497 
1498  return s->mb_y == s->mb_height;
1499 }
1500 
1501 /** @} */ // reconstruction group end
1502 
1503 /**
1504  * Initialize decoder.
1505  */
1507 {
1508  static AVOnce init_static_once = AV_ONCE_INIT;
1509  RV34DecContext *r = avctx->priv_data;
1510  MpegEncContext *s = &r->s;
1511  int ret;
1512 
1513  ret = ff_mpv_decode_init(s, avctx);
1514  if (ret < 0)
1515  return ret;
1516  s->out_format = FMT_H263;
1517 
1518  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
1519  avctx->has_b_frames = 1;
1520  s->low_delay = 0;
1521 
1522  if ((ret = ff_mpv_common_init(s)) < 0)
1523  return ret;
1524 
1525  ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
1526 
1528  if (ret < 0)
1529  return ret;
1530 
1531  ff_thread_once(&init_static_once, rv34_init_tables);
1532 
1533  return 0;
1534 }
1535 
1537 {
1538  RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
1539  MpegEncContext * const s = &r->s, * const s1 = &r1->s;
1540  int err;
1541 
1542  if (dst == src || !s1->context_initialized)
1543  return 0;
1544 
1545  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
1546  s->height = s1->height;
1547  s->width = s1->width;
1548  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1549  return err;
1550  if ((err = rv34_decoder_realloc(r)) < 0)
1551  return err;
1552  }
1553 
1554  r->cur_pts = r1->cur_pts;
1555  r->last_pts = r1->last_pts;
1556  r->next_pts = r1->next_pts;
1557 
1558  memset(&r->si, 0, sizeof(r->si));
1559 
1560  // Do no call ff_mpeg_update_thread_context on a partially initialized
1561  // decoder context.
1562  if (!s1->context_initialized)
1563  return 0;
1564 
1565  return ff_mpeg_update_thread_context(dst, src);
1566 }
1567 
1568 static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
1569 {
1570  if (n < slice_count) {
1571  return AV_RL32(buf + n*8 - 4) == 1 ? AV_RL32(buf + n*8) : AV_RB32(buf + n*8);
1572  } else
1573  return buf_size;
1574 }
1575 
1576 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
1577 {
1578  RV34DecContext *r = avctx->priv_data;
1579  MpegEncContext *s = &r->s;
1580  int got_picture = 0, ret;
1581 
1582  ff_er_frame_end(&s->er, NULL);
1584  s->mb_num_left = 0;
1585 
1586  if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
1587  ff_thread_progress_report(&s->cur_pic.ptr->progress, INT_MAX);
1588 
1589  if (s->pict_type == AV_PICTURE_TYPE_B) {
1590  if ((ret = av_frame_ref(pict, s->cur_pic.ptr->f)) < 0)
1591  return ret;
1592  ff_print_debug_info(s, s->cur_pic.ptr, pict);
1593  ff_mpv_export_qp_table(s, pict, s->cur_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1594  got_picture = 1;
1595  } else if (s->last_pic.ptr) {
1596  if ((ret = av_frame_ref(pict, s->last_pic.ptr->f)) < 0)
1597  return ret;
1598  ff_print_debug_info(s, s->last_pic.ptr, pict);
1599  ff_mpv_export_qp_table(s, pict, s->last_pic.ptr, FF_MPV_QSCALE_TYPE_MPEG1);
1600  got_picture = 1;
1601  }
1602 
1603  return got_picture;
1604 }
1605 
1606 static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
1607 {
1608  // attempt to keep aspect during typical resolution switches
1609  if (!sar.num)
1610  sar = (AVRational){1, 1};
1611 
1612  sar = av_mul_q(sar, av_mul_q((AVRational){new_h, new_w}, (AVRational){old_w, old_h}));
1613  return sar;
1614 }
1615 
1617  int *got_picture_ptr, AVPacket *avpkt)
1618 {
1619  const uint8_t *buf = avpkt->data;
1620  int buf_size = avpkt->size;
1621  RV34DecContext *r = avctx->priv_data;
1622  MpegEncContext *s = &r->s;
1623  SliceInfo si;
1624  int i, ret;
1625  int slice_count;
1626  const uint8_t *slices_hdr = NULL;
1627  int last = 0;
1628  int faulty_b = 0;
1629  int offset;
1630 
1631  /* no supplementary picture */
1632  if (buf_size == 0) {
1633  /* special case for last picture */
1634  if (s->next_pic.ptr) {
1635  if ((ret = av_frame_ref(pict, s->next_pic.ptr->f)) < 0)
1636  return ret;
1637  ff_mpv_unref_picture(&s->next_pic);
1638 
1639  *got_picture_ptr = 1;
1640  }
1641  return 0;
1642  }
1643 
1644  slice_count = (*buf++) + 1;
1645  slices_hdr = buf + 4;
1646  buf += 8 * slice_count;
1647  buf_size -= 1 + 8 * slice_count;
1648 
1649  offset = get_slice_offset(avctx, slices_hdr, 0, slice_count, buf_size);
1650  //parse first slice header to check whether this frame can be decoded
1651  if(offset < 0 || offset > buf_size){
1652  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1653  return AVERROR_INVALIDDATA;
1654  }
1655  init_get_bits(&s->gb, buf+offset, (buf_size-offset)*8);
1656  if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
1657  av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
1658  return AVERROR_INVALIDDATA;
1659  }
1660  if (!s->last_pic.ptr && si.type == AV_PICTURE_TYPE_B) {
1661  av_log(avctx, AV_LOG_ERROR, "Invalid decoder state: B-frame without "
1662  "reference data.\n");
1663  faulty_b = 1;
1664  }
1665  if( (avctx->skip_frame >= AVDISCARD_NONREF && si.type==AV_PICTURE_TYPE_B)
1666  || (avctx->skip_frame >= AVDISCARD_NONKEY && si.type!=AV_PICTURE_TYPE_I)
1667  || avctx->skip_frame >= AVDISCARD_ALL)
1668  return avpkt->size;
1669 
1670  /* first slice */
1671  if (si.start == 0) {
1672  if (s->mb_num_left > 0 && s->cur_pic.ptr) {
1673  av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.\n",
1674  s->mb_num_left);
1675  if (!s->context_reinit)
1676  ff_er_frame_end(&s->er, NULL);
1678  }
1679 
1680  if (s->width != si.width || s->height != si.height || s->context_reinit) {
1681  int err;
1682 
1683  av_log(s->avctx, AV_LOG_WARNING, "Changing dimensions to %dx%d\n",
1684  si.width, si.height);
1685 
1686  if (av_image_check_size(si.width, si.height, 0, s->avctx))
1687  return AVERROR_INVALIDDATA;
1688 
1689  s->avctx->sample_aspect_ratio = update_sar(
1690  s->width, s->height, s->avctx->sample_aspect_ratio,
1691  si.width, si.height);
1692  s->width = si.width;
1693  s->height = si.height;
1694 
1695  err = ff_set_dimensions(s->avctx, s->width, s->height);
1696  if (err < 0)
1697  return err;
1698  if ((err = ff_mpv_common_frame_size_change(s)) < 0)
1699  return err;
1700  if ((err = rv34_decoder_realloc(r)) < 0)
1701  return err;
1702  }
1703  if (faulty_b)
1704  return AVERROR_INVALIDDATA;
1705  s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
1706  if (ff_mpv_frame_start(s, s->avctx) < 0)
1707  return -1;
1709  if (!r->tmp_b_block_base) {
1710  int i;
1711 
1712  r->tmp_b_block_base = av_malloc(s->linesize * 48);
1713  if (!r->tmp_b_block_base)
1714  return AVERROR(ENOMEM);
1715  for (i = 0; i < 2; i++)
1716  r->tmp_b_block_y[i] = r->tmp_b_block_base
1717  + i * 16 * s->linesize;
1718  for (i = 0; i < 4; i++)
1719  r->tmp_b_block_uv[i] = r->tmp_b_block_base + 32 * s->linesize
1720  + (i >> 1) * 8 * s->uvlinesize
1721  + (i & 1) * 16;
1722  }
1723  r->cur_pts = si.pts;
1724  if (s->pict_type != AV_PICTURE_TYPE_B) {
1725  r->last_pts = r->next_pts;
1726  r->next_pts = r->cur_pts;
1727  } else {
1728  int refdist = GET_PTS_DIFF(r->next_pts, r->last_pts);
1729  int dist0 = GET_PTS_DIFF(r->cur_pts, r->last_pts);
1730  int dist1 = GET_PTS_DIFF(r->next_pts, r->cur_pts);
1731 
1732  if(!refdist){
1733  r->mv_weight1 = r->mv_weight2 = r->weight1 = r->weight2 = 8192;
1734  r->scaled_weight = 0;
1735  }else{
1736  if (FFMAX(dist0, dist1) > refdist)
1737  av_log(avctx, AV_LOG_TRACE, "distance overflow\n");
1738 
1739  r->mv_weight1 = (dist0 << 14) / refdist;
1740  r->mv_weight2 = (dist1 << 14) / refdist;
1741  if((r->mv_weight1|r->mv_weight2) & 511){
1742  r->weight1 = r->mv_weight1;
1743  r->weight2 = r->mv_weight2;
1744  r->scaled_weight = 0;
1745  }else{
1746  r->weight1 = r->mv_weight1 >> 9;
1747  r->weight2 = r->mv_weight2 >> 9;
1748  r->scaled_weight = 1;
1749  }
1750  }
1751  }
1752  s->mb_x = s->mb_y = 0;
1753  ff_thread_finish_setup(s->avctx);
1754  } else if (s->context_reinit) {
1755  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames to "
1756  "reinitialize (start MB is %d).\n", si.start);
1757  return AVERROR_INVALIDDATA;
1758  } else if (HAVE_THREADS &&
1759  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1760  av_log(s->avctx, AV_LOG_ERROR, "Decoder needs full frames in frame "
1761  "multithreading mode (start MB is %d).\n", si.start);
1762  return AVERROR_INVALIDDATA;
1763  }
1764 
1765  for(i = 0; i < slice_count; i++){
1766  int offset = get_slice_offset(avctx, slices_hdr, i , slice_count, buf_size);
1767  int offset1 = get_slice_offset(avctx, slices_hdr, i+1, slice_count, buf_size);
1768  int size;
1769 
1770  if(offset < 0 || offset > offset1 || offset1 > buf_size){
1771  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1772  break;
1773  }
1774  size = offset1 - offset;
1775 
1776  r->si.end = s->mb_width * s->mb_height;
1777  s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
1778 
1779  if(i+1 < slice_count){
1780  int offset2 = get_slice_offset(avctx, slices_hdr, i+2, slice_count, buf_size);
1781  if (offset2 < offset1 || offset2 > buf_size) {
1782  av_log(avctx, AV_LOG_ERROR, "Slice offset is invalid\n");
1783  break;
1784  }
1785  init_get_bits(&s->gb, buf+offset1, (buf_size-offset1)*8);
1786  if(r->parse_slice_header(r, &r->s.gb, &si) < 0){
1787  size = offset2 - offset;
1788  }else
1789  r->si.end = si.start;
1790  }
1791  av_assert0 (size >= 0 && size <= buf_size - offset);
1792  last = rv34_decode_slice(r, r->si.end, buf + offset, size);
1793  if(last)
1794  break;
1795  }
1796 
1797  if (s->cur_pic.ptr) {
1798  if (last) {
1799  if(r->loop_filter)
1800  r->loop_filter(r, s->mb_height - 1);
1801 
1802  ret = finish_frame(avctx, pict);
1803  if (ret < 0)
1804  return ret;
1805  *got_picture_ptr = ret;
1806  } else if (HAVE_THREADS &&
1807  (s->avctx->active_thread_type & FF_THREAD_FRAME)) {
1808  av_log(avctx, AV_LOG_INFO, "marking unfished frame as finished\n");
1809  /* always mark the current frame as finished, frame-mt supports
1810  * only complete frames */
1811  ff_er_frame_end(&s->er, NULL);
1813  s->mb_num_left = 0;
1814  ff_thread_progress_report(&s->cur_pic.ptr->progress, INT_MAX);
1815  return AVERROR_INVALIDDATA;
1816  }
1817  }
1818 
1819  return avpkt->size;
1820 }
1821 
1823 {
1824  RV34DecContext *r = avctx->priv_data;
1825 
1827 
1828  return ff_mpv_decode_close(avctx);
1829 }
RV34DecContext
decoder context
Definition: rv34.h:86
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:690
A
#define A(x)
Definition: vpx_arith.h:28
IS_8X8
#define IS_8X8(a)
Definition: mpegutils.h:84
rv34_mb_type_to_lavc
static const int rv34_mb_type_to_lavc[12]
translation of RV30/40 macroblock types to lavc ones
Definition: rv34.c:59
HOR_PRED8x8
#define HOR_PRED8x8
Definition: h264pred.h:69
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
rv34_qscale_tab
static const uint16_t rv34_qscale_tab[32]
This table is used for dequantizing.
Definition: rv34data.h:84
ff_thread_progress_report
void ff_thread_progress_report(ThreadProgress *pro, int n)
This function is a no-op in no-op mode; otherwise it notifies other threads that a certain level of p...
Definition: threadprogress.c:53
rv34_output_intra
static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1107
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
r
const char * r
Definition: vf_curves.c:127
ff_rv34_decode_end
av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
Definition: rv34.c:1822
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
threadprogress.h
rv34_pred_mv_rv3
static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
motion vector prediction - RV3 version
Definition: rv34.c:613
mem_internal.h
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
ThreadProgress
ThreadProgress is an API to easily notify other threads about progress of any kind as long as it can ...
Definition: threadprogress.h:43
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:251
thread.h
rv34_table_inter_secondpat
static const uint8_t rv34_table_inter_secondpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3737
ittrans16
static const int ittrans16[4]
mapping of RV30/40 intra 16x16 prediction types to standard H.264 types
Definition: rv34.c:978
num_mvs
static const int num_mvs[RV34_MB_TYPES]
number of motion vectors in each macroblock type
Definition: rv34.c:865
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:43
chroma_coeffs
static const int chroma_coeffs[3]
Definition: rv34.c:659
ff_rv34_get_start_offset
int ff_rv34_get_start_offset(GetBitContext *gb, int mb_size)
Decode starting slice position.
Definition: rv34.c:339
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
ff_rv34_decode_update_thread_context
int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: rv34.c:1536
AVPacket::data
uint8_t * data
Definition: packet.h:533
DC_PRED
@ DC_PRED
Definition: vp9.h:48
table
static const uint16_t table[]
Definition: prosumer.c:203
rv34_decoder_realloc
static int rv34_decoder_realloc(RV34DecContext *r)
Definition: rv34.c:1417
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:42
check_slice_end
static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
Definition: rv34.c:1361
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:472
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:826
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:823
chroma_mc
#define chroma_mc(a)
Definition: vc1dsp.c:786
mpegvideo.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
rv34_set_deblock_coef
static int rv34_set_deblock_coef(RV34DecContext *r)
Definition: rv34.c:1183
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:40
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:94
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
MPVWorkPicture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:105
avail_indexes
static const uint8_t avail_indexes[4]
availability index for subblocks
Definition: rv34.c:467
MPVWorkPicture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:103
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
golomb.h
exp golomb vlc stuff
NUM_INTRA_TABLES
#define NUM_INTRA_TABLES
Definition: rv34vlc.h:32
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
adjust_pred16
static int adjust_pred16(int itype, int up, int left)
Definition: rv34.c:1012
RV34_MB_B_FORWARD
@ RV34_MB_B_FORWARD
B-frame macroblock, forward prediction.
Definition: rv34.h:49
rv34_decoder_alloc
static int rv34_decoder_alloc(RV34DecContext *r)
Definition: rv34.c:1389
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1820
VERT_PRED
@ VERT_PRED
Definition: vp9.h:46
rv34_pred_mv
static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
motion vector prediction
Definition: rv34.c:476
GetBitContext
Definition: get_bits.h:108
RV34VLC::first_pattern
const VLCElem * first_pattern[4]
VLCs used for decoding coefficients in the first subblock.
Definition: rv34.h:68
DIAG_DOWN_RIGHT_PRED
@ DIAG_DOWN_RIGHT_PRED
Definition: vp9.h:50
rv34_decode_block
static int rv34_decode_block(int16_t *dst, GetBitContext *gb, const RV34VLC *rvlc, int fc, int sc, int q_dc, int q_ac1, int q_ac2)
Decode coefficients for 4x4 block.
Definition: rv34.c:295
RV34_MB_B_DIRECT
@ RV34_MB_B_DIRECT
Bidirectionally predicted B-frame macroblock, no motion vectors.
Definition: rv34.h:52
val
static double val(void *priv, double ch)
Definition: aeval.c:78
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
rv34_count_ones
static const uint8_t rv34_count_ones[16]
number of ones in nibble minus one
Definition: rv34data.h:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
rv34_table_intra_firstpat
static const uint8_t rv34_table_intra_firstpat[NUM_INTRA_TABLES][4][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:940
rv34data.h
quant
static const uint8_t quant[64]
Definition: vmixdec.c:71
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
avassert.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
mpegvideodec.h
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
HOR_PRED
@ HOR_PRED
Definition: vp9.h:47
av_cold
#define av_cold
Definition: attributes.h:90
ff_rv34_decode_init
av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
Initialize decoder.
Definition: rv34.c:1506
rv34_pred_4x4_block
static void rv34_pred_4x4_block(RV34DecContext *r, uint8_t *dst, int stride, int itype, int up, int left, int down, int right)
Perform 4x4 intra prediction.
Definition: rv34.c:985
rv34_decode_intra_macroblock
static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1321
ZERO8x2
static void ZERO8x2(void *dst, int stride)
Definition: rv34.c:52
mask
static const uint16_t mask[17]
Definition: lzw.c:38
RV34VLC
VLC tables used by the decoder.
Definition: rv34.h:65
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:723
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:896
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo_dec.c:172
width
#define width
rv34_mc_1mv
static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir)
Definition: rv34.c:794
rv34_decode_inter_macroblock
static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
Definition: rv34.c:1213
intra_vlcs
static RV34VLC intra_vlcs[NUM_INTRA_TABLES]
Definition: rv34.c:75
s
#define s(width, name)
Definition: cbs_vp9.c:198
IS_16X8
#define IS_16X8(a)
Definition: mpegutils.h:82
VERT_LEFT_PRED_RV40_NODOWN
#define VERT_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:56
RV34VLC::cbp
VLC cbp[2][4]
VLCs used for coded block patterns decoding.
Definition: rv34.h:67
CBPPAT_VLC_SIZE
#define CBPPAT_VLC_SIZE
Definition: rv34vlc.h:35
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:49
calc_add_mv
static int calc_add_mv(RV34DecContext *r, int dir, int val)
Calculate motion vector component that should be added for direct blocks.
Definition: rv34.c:531
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:150
LEFT_DC_PRED
@ LEFT_DC_PRED
Definition: vp9.h:56
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
B
#define B
Definition: huffyuv.h:42
decode.h
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:76
CBP_VLC_SIZE
#define CBP_VLC_SIZE
Definition: rv34vlc.h:36
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
finish_frame
static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
Definition: rv34.c:1576
rv34_mb_max_sizes
static const uint16_t rv34_mb_max_sizes[6]
maximum number of macroblocks for each of the possible slice offset sizes
Definition: rv34data.h:106
decode_coeff
static void decode_coeff(int16_t *dst, int coef, int esc, GetBitContext *gb, const VLCElem *vlc, int q)
Get one coefficient value from the bitstream and store it.
Definition: rv34.c:225
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
MB_TYPE_8x16
#define MB_TYPE_8x16
Definition: mpegutils.h:44
TOP_DC_PRED8x8
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
RV34VLC::second_pattern
const VLCElem * second_pattern[2]
VLCs used for decoding coefficients in the subblocks 2 and 3.
Definition: rv34.h:69
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:220
rv34_inter_coeff
static const uint8_t rv34_inter_coeff[NUM_INTER_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:4024
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
ff_thread_progress_await
void ff_thread_progress_await(const ThreadProgress *pro_c, int n)
This function is a no-op in no-op mode; otherwise it waits until other threads have reached a certain...
Definition: threadprogress.c:65
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const MPVPicture *p, int qp_type)
Definition: mpegvideo_dec.c:410
RV34VLC::cbppattern
const VLCElem * cbppattern[2]
VLCs used for pattern of coded block patterns decoding.
Definition: rv34.h:66
NULL
#define NULL
Definition: coverity.c:32
GET_PTS_DIFF
#define GET_PTS_DIFF(a, b)
Definition: rv34.c:526
rv34_decode_slice
static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t *buf, int buf_size)
Definition: rv34.c:1424
rv34_init_tables
static av_cold void rv34_init_tables(void)
Initialize all tables.
Definition: rv34.c:137
RV34_MB_SKIP
@ RV34_MB_SKIP
Skipped block.
Definition: rv34.h:51
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
decode_subblock
static void decode_subblock(int16_t *dst, int code, const int is_block2, GetBitContext *gb, const VLCElem *vlc, int q)
Decode 2x2 subblock of coefficients.
Definition: rv34.c:246
COEFF_VLC_SIZE
#define COEFF_VLC_SIZE
Definition: rv34vlc.h:39
rv34_table_intra_cbppat
static const uint8_t rv34_table_intra_cbppat[NUM_INTRA_TABLES][2][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:42
RV34VLC::third_pattern
const VLCElem * third_pattern[2]
VLCs used for decoding coefficients in the last subblock.
Definition: rv34.h:70
MB_TYPE_8x8
#define MB_TYPE_8x8
Definition: mpegutils.h:45
SliceInfo::type
int type
slice type (intra, inter)
Definition: rv34.h:76
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:37
decode_subblock3
static void decode_subblock3(int16_t *dst, int code, GetBitContext *gb, const VLCElem *vlc, int q_dc, int q_ac1, int q_ac2)
Definition: rv34.c:272
V
#define V
Definition: avdct.c:31
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
mathops.h
VERT_PRED8x8
#define VERT_PRED8x8
Definition: h264pred.h:70
MB_TYPE_BIDIR_MV
#define MB_TYPE_BIDIR_MV
Definition: mpegutils.h:52
qpeldsp.h
rv34_gen_vlc_ext
static av_cold void rv34_gen_vlc_ext(const uint8_t *bits, int size, VLC *vlc, const uint8_t *syms, int *offset)
Generate VLC from codeword lengths.
Definition: rv34.c:94
rv34_table_intra_secondpat
static const uint8_t rv34_table_intra_secondpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2074
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
MAX_VLC_SIZE
#define MAX_VLC_SIZE
Definition: rv34vlc.h:40
rv34.h
FF_MPV_QSCALE_TYPE_MPEG1
#define FF_MPV_QSCALE_TYPE_MPEG1
Definition: mpegvideodec.h:40
AVOnce
#define AVOnce
Definition: thread.h:202
rv34_decode_mv
static int rv34_decode_mv(RV34DecContext *r, int block_type)
Decode motion vector differences and perform motion vector reconstruction and motion compensation.
Definition: rv34.c:871
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
RV34_MB_P_8x8
@ RV34_MB_P_8x8
P-frame macroblock, 8x8 motion compensation partitions.
Definition: rv34.h:48
rv34_table_intra_thirdpat
static const uint8_t rv34_table_intra_thirdpat[NUM_INTRA_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:2177
VLC::table_allocated
int table_allocated
Definition: vlc.h:39
rv34_mc_2mv_skip
static void rv34_mc_2mv_skip(RV34DecContext *r)
Definition: rv34.c:844
IS_INTRA
#define IS_INTRA(x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:219
rv34_cbp_code
static const uint8_t rv34_cbp_code[16]
values used to reconstruct coded block pattern
Definition: rv34data.h:42
is_mv_diff_gt_3
static int is_mv_diff_gt_3(int16_t(*motion_val)[2], int step)
Definition: rv34.c:1171
AVPacket::size
int size
Definition: packet.h:534
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:384
RV34_MB_B_BACKWARD
@ RV34_MB_B_BACKWARD
B-frame macroblock, backward prediction.
Definition: rv34.h:50
ff_rv34_decode_frame
int ff_rv34_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_picture_ptr, AVPacket *avpkt)
Definition: rv34.c:1616
rectangle.h
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:609
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
update_sar
static AVRational update_sar(int old_w, int old_h, AVRational sar, int new_w, int new_h)
Definition: rv34.c:1606
FIRSTBLK_VLC_SIZE
#define FIRSTBLK_VLC_SIZE
Definition: rv34vlc.h:37
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
RV34_MB_P_8x16
@ RV34_MB_P_8x16
P-frame macroblock, 8x16 motion compensation partitions.
Definition: rv34.h:54
size
int size
Definition: twinvq_data.h:10344
VERT_RIGHT_PRED
@ VERT_RIGHT_PRED
Definition: vp9.h:51
VLCElem
Definition: vlc.h:32
decode_subblock1
static void decode_subblock1(int16_t *dst, int code, GetBitContext *gb, const VLCElem *vlc, int q)
Decode a single coefficient.
Definition: rv34.c:265
rv34_decode_cbp
static int rv34_decode_cbp(GetBitContext *gb, const RV34VLC *vlc, int table)
Decode coded block pattern.
Definition: rv34.c:192
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
DC_128_PRED8x8
#define DC_128_PRED8x8
Definition: h264pred.h:76
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
rv34_inter_cbppat
static const uint8_t rv34_inter_cbppat[NUM_INTER_TABLES][CBPPAT_VLC_SIZE]
Definition: rv34vlc.h:2305
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:344
height
#define height
SliceInfo::pts
int pts
frame timestamp
Definition: rv34.h:82
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
OTHERBLK_VLC_SIZE
#define OTHERBLK_VLC_SIZE
Definition: rv34vlc.h:38
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:71
ff_vlc_init_sparse
int ff_vlc_init_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Build VLC decoding tables suitable for use with get_vlc2().
Definition: vlc.c:250
PLANE_PRED8x8
#define PLANE_PRED8x8
Definition: h264pred.h:71
Y
#define Y
Definition: boxblur.h:37
rv34_output_i16x16
static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
Definition: rv34.c:1044
RV34_MB_TYPE_INTRA16x16
@ RV34_MB_TYPE_INTRA16x16
Intra macroblock with DCs in a separate 4x4 block.
Definition: rv34.h:46
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
rv34_pred_mv_b
static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
motion vector prediction for B-frames
Definition: rv34.c:561
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1594
rv34_table_inter_thirdpat
static const uint8_t rv34_table_inter_thirdpat[NUM_INTER_TABLES][2][OTHERBLK_VLC_SIZE]
Definition: rv34vlc.h:3880
DIAG_DOWN_LEFT_PRED_RV40_NODOWN
#define DIAG_DOWN_LEFT_PRED_RV40_NODOWN
Definition: h264pred.h:54
SliceInfo::height
int height
coded height
Definition: rv34.h:81
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:403
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
AV_CODEC_ID_RV40
@ AV_CODEC_ID_RV40
Definition: codec_id.h:121
part_sizes_h
static const uint8_t part_sizes_h[RV34_MB_TYPES]
macroblock partition height in 8x8 blocks
Definition: rv34.c:464
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:371
rv34_table_inter_firstpat
static const uint8_t rv34_table_inter_firstpat[NUM_INTER_TABLES][2][FIRSTBLK_VLC_SIZE]
Definition: rv34vlc.h:2936
internal.h
HOR_UP_PRED_RV40_NODOWN
#define HOR_UP_PRED_RV40_NODOWN
Definition: h264pred.h:55
rv34_mc_2mv
static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
Definition: rv34.c:825
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
rv34_table_intra_cbp
static const uint8_t rv34_table_intra_cbp[NUM_INTRA_TABLES][8][CBP_VLC_SIZE]
Definition: rv34vlc.h:886
MB_TYPE_BACKWARD_MV
#define MB_TYPE_BACKWARD_MV
Definition: mpegutils.h:51
RV34_MB_TYPE_INTRA
@ RV34_MB_TYPE_INTRA
Intra macroblock.
Definition: rv34.h:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
SUINT
#define SUINT
Definition: dct32_template.c:30
RV34_MB_TYPES
@ RV34_MB_TYPES
Definition: rv34.h:57
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
table_data
static VLCElem table_data[117592]
Definition: rv34.c:84
rv34_quant_to_vlc_set
static const uint8_t rv34_quant_to_vlc_set[2][32]
tables used to translate a quantizer value into a VLC set for decoding The first table is used for in...
Definition: rv34data.h:95
SliceInfo
essential slice information
Definition: rv34.h:75
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
get_slice_offset
static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, int slice_count, int buf_size)
Definition: rv34.c:1568
mod
static int mod(int a, int b)
Modulo operation with only positive remainders.
Definition: vf_v360.c:752
LEFT_DC_PRED8x8
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
VLC::bits
int bits
Definition: vlc.h:37
mid_pred
#define mid_pred
Definition: mathops.h:96
ret
ret
Definition: filter_design.txt:187
INVALID_VLC
#define INVALID_VLC
Definition: golomb.h:37
RV34VLC::coefficient
const VLCElem * coefficient
VLCs used for decoding big coefficients.
Definition: rv34.h:71
rv4_weight
static void rv4_weight(RV34DecContext *r)
Definition: rv34.c:803
ff_mpv_decode_init
int ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:46
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
rv34_inter_cbp
static const uint8_t rv34_inter_cbp[NUM_INTER_TABLES][4][CBP_VLC_SIZE]
Definition: rv34vlc.h:2890
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:77
AVCodecContext
main external API structure.
Definition: avcodec.h:445
VLC_INIT_STATIC_OVERLONG
#define VLC_INIT_STATIC_OVERLONG
Definition: vlc.h:183
SliceInfo::start
int start
Definition: rv34.h:79
rv34_decode_inter_mb_header
static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode inter macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:398
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
HOR_UP_PRED
@ HOR_UP_PRED
Definition: vp9.h:54
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
rv34_intra_coeff
static const uint8_t rv34_intra_coeff[NUM_INTRA_TABLES][COEFF_VLC_SIZE]
Definition: rv34vlc.h:2281
error_resilience.h
part_sizes_w
static const uint8_t part_sizes_w[RV34_MB_TYPES]
macroblock partition width in 8x8 blocks
Definition: rv34.c:461
VLC
Definition: vlc.h:36
ittrans
static const int ittrans[9]
mapping of RV30/40 intra prediction types to standard H.264 types
Definition: rv34.c:972
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:826
rv34_chroma_quant
static const uint8_t rv34_chroma_quant[2][32]
quantizer values used for AC and DC coefficients in chroma blocks
Definition: rv34data.h:74
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:395
VLC::table
VLCElem * table
Definition: vlc.h:38
rv34_decode_intra_mb_header
static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
Decode intra macroblock header and return CBP in case of success, -1 otherwise.
Definition: rv34.c:363
HOR_DOWN_PRED
@ HOR_DOWN_PRED
Definition: vp9.h:52
rv34_mb_bits_sizes
static const uint8_t rv34_mb_bits_sizes[6]
bits needed to code the slice offset for the given size
Definition: rv34data.h:111
IS_8X16
#define IS_8X16(a)
Definition: mpegutils.h:83
rv34_process_block
static void rv34_process_block(RV34DecContext *r, uint8_t *pdst, int stride, int fc, int sc, int q_dc, int q_ac)
Definition: rv34.c:1028
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
RV34_MB_P_MIX16x16
@ RV34_MB_P_MIX16x16
P-frame macroblock with DCs in a separate 4x4 block, one motion vector.
Definition: rv34.h:56
rv34vlc.h
VLC::table_size
int table_size
Definition: vlc.h:39
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
mem.h
rv34_mc
static void rv34_mc(RV34DecContext *r, const int block_type, const int xoff, const int yoff, int mv_off, const int width, const int height, int dir, const int thirdpel, int weighted, qpel_mc_func(*qpel_mc)[16], h264_chroma_mc_func(*chroma_mc))
generic motion compensation function
Definition: rv34.c:676
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:38
MPVWorkPicture
Definition: mpegpicture.h:95
MB_TYPE_SEPARATE_DC
#define MB_TYPE_SEPARATE_DC
Definition: rv34.h:38
RV34_MB_P_16x8
@ RV34_MB_P_16x8
P-frame macroblock, 16x8 motion compensation partitions.
Definition: rv34.h:53
TOP_DC_PRED
@ TOP_DC_PRED
Definition: vp9.h:57
AVPacket
This structure stores compressed data.
Definition: packet.h:510
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
inter_vlcs
static RV34VLC inter_vlcs[NUM_INTER_TABLES]
Definition: rv34.c:75
mpeg_er.h
d
d
Definition: ffmpeg_filter.c:424
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
SliceInfo::width
int width
coded width
Definition: rv34.h:80
imgutils.h
MB_TYPE_DIRECT2
#define MB_TYPE_DIRECT2
Definition: mpegutils.h:47
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
RV34_MB_P_16x16
@ RV34_MB_P_16x16
P-frame macroblock, one motion frame.
Definition: rv34.h:47
choose_vlc_set
static RV34VLC * choose_vlc_set(int quant, int mod, int type)
Select VLC set for decoding from current quantizer, modifier and frame type.
Definition: rv34.c:351
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
RV34_MB_B_BIDIR
@ RV34_MB_B_BIDIR
Bidirectionally predicted B-frame macroblock, two motion vectors.
Definition: rv34.h:55
modulo_three_table
static const uint8_t modulo_three_table[108]
precalculated results of division by three and modulo three for values 0-107
Definition: rv34data.h:53
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
rv34_gen_vlc
static av_cold void rv34_gen_vlc(const uint8_t *bits, int size, const VLCElem **vlcp, int *offset)
Definition: rv34.c:126
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:216
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
MB_TYPE_FORWARD_MV
#define MB_TYPE_FORWARD_MV
Definition: mpegutils.h:50
ff_mpv_decode_close
int ff_mpv_decode_close(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:163
rv34_decoder_free
static void rv34_decoder_free(RV34DecContext *r)
Definition: rv34.c:1377
shifts
static const uint8_t shifts[2][12]
Definition: camellia.c:178
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:65
NUM_INTER_TABLES
#define NUM_INTER_TABLES
Definition: rv34vlc.h:33
rv34_pred_b_vector
static void rv34_pred_b_vector(int A[2], int B[2], int C[2], int A_avail, int B_avail, int C_avail, int *mx, int *my)
Predict motion vector for B-frame macroblock.
Definition: rv34.c:541