FFmpeg
mpegvideo_motion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000,2001 Fabrice Bellard
3  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/internal.h"
28 #include "libavutil/mem_internal.h"
29 
30 #include "avcodec.h"
31 #include "h261.h"
32 #include "mpegutils.h"
33 #include "mpegvideo.h"
34 #include "mpeg4videodec.h"
35 #include "qpeldsp.h"
36 #include "wmv2.h"
37 
38 static inline int hpel_motion(MpegEncContext *s,
39  uint8_t *dest, uint8_t *src,
40  int src_x, int src_y,
41  const op_pixels_func *pix_op,
42  int motion_x, int motion_y)
43 {
44  int dxy = 0;
45  int emu = 0;
46 
47  src_x += motion_x >> 1;
48  src_y += motion_y >> 1;
49 
50  /* WARNING: do no forget half pels */
51  src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
52  if (src_x != s->width)
53  dxy |= motion_x & 1;
54  src_y = av_clip(src_y, -16, s->height);
55  if (src_y != s->height)
56  dxy |= (motion_y & 1) << 1;
57  src += src_y * s->linesize + src_x;
58 
59  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) ||
60  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) {
61  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
62  s->linesize, s->linesize,
63  9, 9,
64  src_x, src_y,
65  s->h_edge_pos, s->v_edge_pos);
66  src = s->sc.edge_emu_buffer;
67  emu = 1;
68  }
69  pix_op[dxy](dest, src, s->linesize, 8);
70  return emu;
71 }
72 
73 static av_always_inline
75  uint8_t *dest_y,
76  uint8_t *dest_cb,
77  uint8_t *dest_cr,
78  int field_based,
79  int bottom_field,
80  int field_select,
81  uint8_t *const *ref_picture,
82  const op_pixels_func (*pix_op)[4],
83  int motion_x,
84  int motion_y,
85  int h,
86  int is_mpeg12,
87  int is_16x8,
88  int mb_y)
89 {
90  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
91  int dxy, uvdxy, mx, my, src_x, src_y,
92  uvsrc_x, uvsrc_y, v_edge_pos, block_y_half;
93  ptrdiff_t uvlinesize, linesize;
94 
95  v_edge_pos = s->v_edge_pos >> field_based;
96  linesize = s->cur_pic.linesize[0] << field_based;
97  uvlinesize = s->cur_pic.linesize[1] << field_based;
98  block_y_half = (field_based | is_16x8);
99 
100  dxy = ((motion_y & 1) << 1) | (motion_x & 1);
101  src_x = s->mb_x * 16 + (motion_x >> 1);
102  src_y = (mb_y << (4 - block_y_half)) + (motion_y >> 1);
103 
104  if (!is_mpeg12 && s->out_format == FMT_H263) {
105  if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
106  mx = (motion_x >> 1) | (motion_x & 1);
107  my = motion_y >> 1;
108  uvdxy = ((my & 1) << 1) | (mx & 1);
109  uvsrc_x = s->mb_x * 8 + (mx >> 1);
110  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
111  } else {
112  uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
113  uvsrc_x = src_x >> 1;
114  uvsrc_y = src_y >> 1;
115  }
116  // Even chroma mv's are full pel in H261
117  } else if (!CONFIG_SMALL && !is_mpeg12 ||
118  CONFIG_SMALL && s->out_format == FMT_H261) {
119  av_assert2(s->out_format == FMT_H261);
120  mx = motion_x / 4;
121  my = motion_y / 4;
122  uvdxy = 0;
123  uvsrc_x = s->mb_x * 8 + mx;
124  uvsrc_y = mb_y * 8 + my;
125  } else {
126  av_assert2(s->out_format == FMT_MPEG1);
127  if (s->chroma_y_shift) {
128  mx = motion_x / 2;
129  my = motion_y / 2;
130  uvdxy = ((my & 1) << 1) | (mx & 1);
131  uvsrc_x = s->mb_x * 8 + (mx >> 1);
132  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
133  } else {
134  if (s->chroma_x_shift) {
135  // Chroma422
136  mx = motion_x / 2;
137  uvdxy = ((motion_y & 1) << 1) | (mx & 1);
138  uvsrc_x = s->mb_x * 8 + (mx >> 1);
139  uvsrc_y = src_y;
140  } else {
141  // Chroma444
142  uvdxy = dxy;
143  uvsrc_x = src_x;
144  uvsrc_y = src_y;
145  }
146  }
147  }
148 
149  ptr_y = ref_picture[0] + src_y * linesize + src_x;
150  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
151  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
152 
153  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 15 , 0) ||
154  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 1) - h + 1, 0)) {
155  if (is_mpeg12 || (CONFIG_SMALL &&
156  (s->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
157  s->codec_id == AV_CODEC_ID_MPEG1VIDEO))) {
158  av_log(s->avctx, AV_LOG_DEBUG,
159  "MPEG motion vector out of boundary (%d %d)\n", src_x,
160  src_y);
161  return;
162  }
163  src_y = (unsigned)src_y << field_based;
164  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
165  s->linesize, s->linesize,
166  17, 17 + field_based,
167  src_x, src_y,
168  s->h_edge_pos, s->v_edge_pos);
169  ptr_y = s->sc.edge_emu_buffer;
170  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
171  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
172  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
173  if (s->workaround_bugs & FF_BUG_IEDGE)
174  vbuf -= s->uvlinesize;
175  uvsrc_y = (unsigned)uvsrc_y << field_based;
176  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
177  s->uvlinesize, s->uvlinesize,
178  9, 9 + field_based,
179  uvsrc_x, uvsrc_y,
180  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
181  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
182  s->uvlinesize, s->uvlinesize,
183  9, 9 + field_based,
184  uvsrc_x, uvsrc_y,
185  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
186  ptr_cb = ubuf;
187  ptr_cr = vbuf;
188  }
189  }
190 
191  /* FIXME use this for field pix too instead of the obnoxious hack which
192  * changes picture.data */
193  if (bottom_field) {
194  dest_y += s->linesize;
195  dest_cb += s->uvlinesize;
196  dest_cr += s->uvlinesize;
197  }
198 
199  if (field_select) {
200  ptr_y += s->linesize;
201  ptr_cb += s->uvlinesize;
202  ptr_cr += s->uvlinesize;
203  }
204 
205  pix_op[0][dxy](dest_y, ptr_y, linesize, h);
206 
207  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
208  pix_op[s->chroma_x_shift][uvdxy]
209  (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
210  pix_op[s->chroma_x_shift][uvdxy]
211  (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
212  }
213  if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
214  s->out_format == FMT_H261) {
216  }
217 }
218 /* apply one mpeg motion vector to the three components */
220  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
221  int field_select, uint8_t *const *ref_picture,
222  const op_pixels_func (*pix_op)[4],
223  int motion_x, int motion_y, int h, int is_16x8, int mb_y)
224 {
225 #if !CONFIG_SMALL
226  if (s->out_format == FMT_MPEG1)
227  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
228  field_select, ref_picture, pix_op,
229  motion_x, motion_y, h, 1, is_16x8, mb_y);
230  else
231 #endif
232  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
233  field_select, ref_picture, pix_op,
234  motion_x, motion_y, h, 0, is_16x8, mb_y);
235 }
236 
237 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
238  uint8_t *dest_cb, uint8_t *dest_cr,
239  int bottom_field, int field_select,
240  uint8_t *const *ref_picture,
241  const op_pixels_func (*pix_op)[4],
242  int motion_x, int motion_y, int mb_y)
243 {
244 #if !CONFIG_SMALL
245  if (s->out_format == FMT_MPEG1)
246  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
247  bottom_field, field_select, ref_picture, pix_op,
248  motion_x, motion_y, 8, 1, 0, mb_y);
249  else
250 #endif
251  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
252  bottom_field, field_select, ref_picture, pix_op,
253  motion_x, motion_y, 8, 0, 0, mb_y);
254 }
255 
256 // FIXME: SIMDify, avg variant, 16x16 version
257 static inline void put_obmc(uint8_t *dst, uint8_t *const src[5], int stride)
258 {
259  int x;
260  uint8_t *const top = src[1];
261  uint8_t *const left = src[2];
262  uint8_t *const mid = src[0];
263  uint8_t *const right = src[3];
264  uint8_t *const bottom = src[4];
265 #define OBMC_FILTER(x, t, l, m, r, b)\
266  dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
267 #define OBMC_FILTER4(x, t, l, m, r, b)\
268  OBMC_FILTER(x , t, l, m, r, b);\
269  OBMC_FILTER(x+1 , t, l, m, r, b);\
270  OBMC_FILTER(x +stride, t, l, m, r, b);\
271  OBMC_FILTER(x+1+stride, t, l, m, r, b);
272 
273  x = 0;
274  OBMC_FILTER (x , 2, 2, 4, 0, 0);
275  OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
276  OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
277  OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
278  OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
279  OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
280  x += stride;
281  OBMC_FILTER (x , 1, 2, 5, 0, 0);
282  OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
283  OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
284  OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
285  x += stride;
286  OBMC_FILTER4(x , 1, 2, 5, 0, 0);
287  OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
288  OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
289  OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
290  x += 2 * stride;
291  OBMC_FILTER4(x , 0, 2, 5, 0, 1);
292  OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
293  OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
294  OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
295  x += 2*stride;
296  OBMC_FILTER (x , 0, 2, 5, 0, 1);
297  OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
298  OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
299  OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
300  OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
301  OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
302  x += stride;
303  OBMC_FILTER (x , 0, 2, 4, 0, 2);
304  OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
305  OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
306  OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
307 }
308 
309 /* obmc for 1 8x8 luma block */
310 static inline void obmc_motion(MpegEncContext *s,
311  uint8_t *dest, uint8_t *src,
312  int src_x, int src_y,
313  const op_pixels_func *pix_op,
314  int16_t mv[5][2] /* mid top left right bottom */)
315 #define MID 0
316 {
317  int i;
318  uint8_t *ptr[5];
319 
320  av_assert2(s->quarter_sample == 0);
321 
322  for (i = 0; i < 5; i++) {
323  if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
324  ptr[i] = ptr[MID];
325  } else {
326  ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) +
327  s->linesize * 8 * (i >> 1);
328  hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
329  mv[i][0], mv[i][1]);
330  }
331  }
332 
333  put_obmc(dest, ptr, s->linesize);
334 }
335 
336 static inline void qpel_motion(MpegEncContext *s,
337  uint8_t *dest_y,
338  uint8_t *dest_cb,
339  uint8_t *dest_cr,
340  int field_based, int bottom_field,
341  int field_select, uint8_t *const *ref_picture,
342  const op_pixels_func (*pix_op)[4],
343  const qpel_mc_func (*qpix_op)[16],
344  int motion_x, int motion_y, int h)
345 {
346  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
347  int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
348  ptrdiff_t linesize, uvlinesize;
349 
350  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
351 
352  src_x = s->mb_x * 16 + (motion_x >> 2);
353  src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
354 
355  v_edge_pos = s->v_edge_pos >> field_based;
356  linesize = s->linesize << field_based;
357  uvlinesize = s->uvlinesize << field_based;
358 
359  if (field_based) {
360  mx = motion_x / 2;
361  my = motion_y >> 1;
362  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
363  static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
364  mx = (motion_x >> 1) + rtab[motion_x & 7];
365  my = (motion_y >> 1) + rtab[motion_y & 7];
366  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
367  mx = (motion_x >> 1) | (motion_x & 1);
368  my = (motion_y >> 1) | (motion_y & 1);
369  } else {
370  mx = motion_x / 2;
371  my = motion_y / 2;
372  }
373  mx = (mx >> 1) | (mx & 1);
374  my = (my >> 1) | (my & 1);
375 
376  uvdxy = (mx & 1) | ((my & 1) << 1);
377  mx >>= 1;
378  my >>= 1;
379 
380  uvsrc_x = s->mb_x * 8 + mx;
381  uvsrc_y = s->mb_y * (8 >> field_based) + my;
382 
383  ptr_y = ref_picture[0] + src_y * linesize + src_x;
384  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
385  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
386 
387  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 15 , 0) ||
388  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 3) - h + 1, 0)) {
389  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
390  s->linesize, s->linesize,
391  17, 17 + field_based,
392  src_x, src_y * (1 << field_based),
393  s->h_edge_pos, s->v_edge_pos);
394  ptr_y = s->sc.edge_emu_buffer;
395  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
396  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
397  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
398  if (s->workaround_bugs & FF_BUG_IEDGE)
399  vbuf -= s->uvlinesize;
400  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
401  s->uvlinesize, s->uvlinesize,
402  9, 9 + field_based,
403  uvsrc_x, uvsrc_y * (1 << field_based),
404  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
405  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
406  s->uvlinesize, s->uvlinesize,
407  9, 9 + field_based,
408  uvsrc_x, uvsrc_y * (1 << field_based),
409  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
410  ptr_cb = ubuf;
411  ptr_cr = vbuf;
412  }
413  }
414 
415  if (!field_based)
416  qpix_op[0][dxy](dest_y, ptr_y, linesize);
417  else {
418  if (bottom_field) {
419  dest_y += s->linesize;
420  dest_cb += s->uvlinesize;
421  dest_cr += s->uvlinesize;
422  }
423 
424  if (field_select) {
425  ptr_y += s->linesize;
426  ptr_cb += s->uvlinesize;
427  ptr_cr += s->uvlinesize;
428  }
429  // damn interlaced mode
430  // FIXME boundary mirroring is not exactly correct here
431  qpix_op[1][dxy](dest_y, ptr_y, linesize);
432  qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
433  }
434  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
435  pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
436  pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
437  }
438 }
439 
440 /**
441  * H.263 chroma 4mv motion compensation.
442  */
444  uint8_t *dest_cb, uint8_t *dest_cr,
445  uint8_t *const *ref_picture,
446  const op_pixels_func *pix_op,
447  int mx, int my)
448 {
449  const uint8_t *ptr;
450  int src_x, src_y, dxy, emu = 0;
451  ptrdiff_t offset;
452 
453  /* In case of 8X8, we construct a single chroma motion vector
454  * with a special rounding */
457 
458  dxy = ((my & 1) << 1) | (mx & 1);
459  mx >>= 1;
460  my >>= 1;
461 
462  src_x = s->mb_x * 8 + mx;
463  src_y = s->mb_y * 8 + my;
464  src_x = av_clip(src_x, -8, (s->width >> 1));
465  if (src_x == (s->width >> 1))
466  dxy &= ~1;
467  src_y = av_clip(src_y, -8, (s->height >> 1));
468  if (src_y == (s->height >> 1))
469  dxy &= ~2;
470 
471  offset = src_y * s->uvlinesize + src_x;
472  ptr = ref_picture[1] + offset;
473  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 7, 0) ||
474  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) {
475  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
476  s->uvlinesize, s->uvlinesize,
477  9, 9, src_x, src_y,
478  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
479  ptr = s->sc.edge_emu_buffer;
480  emu = 1;
481  }
482  pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
483 
484  ptr = ref_picture[2] + offset;
485  if (emu) {
486  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
487  s->uvlinesize, s->uvlinesize,
488  9, 9, src_x, src_y,
489  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
490  ptr = s->sc.edge_emu_buffer;
491  }
492  pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
493 }
494 
495 static inline void prefetch_motion(MpegEncContext *s, uint8_t *const *pix, int dir)
496 {
497  /* fetch pixels for estimated mv 4 macroblocks ahead
498  * optimized for 64byte cache lines */
499  const int shift = s->quarter_sample ? 2 : 1;
500  const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
501  const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
502  int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
503 
504  s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
505  off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
506  s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
507 }
508 
509 static inline void apply_obmc(MpegEncContext *s,
510  uint8_t *dest_y,
511  uint8_t *dest_cb,
512  uint8_t *dest_cr,
513  uint8_t *const *ref_picture,
514  const op_pixels_func (*pix_op)[4])
515 {
516  LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
517  const MPVWorkPicture *cur_frame = &s->cur_pic;
518  int mb_x = s->mb_x;
519  int mb_y = s->mb_y;
520  const int xy = mb_x + mb_y * s->mb_stride;
521  const int mot_stride = s->b8_stride;
522  const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
523  int mx, my, i;
524 
525  av_assert2(!s->mb_skipped);
526 
527  AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
528  AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
529 
530  AV_COPY32(mv_cache[2][1],
531  cur_frame->motion_val[0][mot_xy + mot_stride]);
532  AV_COPY32(mv_cache[2][2],
533  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
534 
535  AV_COPY32(mv_cache[3][1],
536  cur_frame->motion_val[0][mot_xy + mot_stride]);
537  AV_COPY32(mv_cache[3][2],
538  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
539 
540  if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
541  AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
542  AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
543  } else {
544  AV_COPY32(mv_cache[0][1],
545  cur_frame->motion_val[0][mot_xy - mot_stride]);
546  AV_COPY32(mv_cache[0][2],
547  cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
548  }
549 
550  if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
551  AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
552  AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
553  } else {
554  AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
555  AV_COPY32(mv_cache[2][0],
556  cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
557  }
558 
559  if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
560  AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
561  AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
562  } else {
563  AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
564  AV_COPY32(mv_cache[2][3],
565  cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
566  }
567 
568  mx = 0;
569  my = 0;
570  for (i = 0; i < 4; i++) {
571  const int x = (i & 1) + 1;
572  const int y = (i >> 1) + 1;
573  int16_t mv[5][2] = {
574  { mv_cache[y][x][0], mv_cache[y][x][1] },
575  { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
576  { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
577  { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
578  { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
579  };
580  // FIXME cleanup
581  obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
582  ref_picture[0],
583  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
584  pix_op[1],
585  mv);
586 
587  mx += mv[0][0];
588  my += mv[0][1];
589  }
590  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
591  chroma_4mv_motion(s, dest_cb, dest_cr,
592  ref_picture, pix_op[1],
593  mx, my);
594 }
595 
596 static inline void apply_8x8(MpegEncContext *s,
597  uint8_t *dest_y,
598  uint8_t *dest_cb,
599  uint8_t *dest_cr,
600  int dir,
601  uint8_t *const *ref_picture,
602  const qpel_mc_func (*qpix_op)[16],
603  const op_pixels_func (*pix_op)[4])
604 {
605  int dxy, mx, my, src_x, src_y;
606  int i;
607  int mb_x = s->mb_x;
608  int mb_y = s->mb_y;
609  uint8_t *dest;
610  const uint8_t *ptr;
611 
612  mx = 0;
613  my = 0;
614  if (s->quarter_sample) {
615  for (i = 0; i < 4; i++) {
616  int motion_x = s->mv[dir][i][0];
617  int motion_y = s->mv[dir][i][1];
618 
619  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
620  src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
621  src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
622 
623  /* WARNING: do no forget half pels */
624  src_x = av_clip(src_x, -16, s->width);
625  if (src_x == s->width)
626  dxy &= ~3;
627  src_y = av_clip(src_y, -16, s->height);
628  if (src_y == s->height)
629  dxy &= ~12;
630 
631  ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
632  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) ||
633  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) {
634  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
635  s->linesize, s->linesize,
636  9, 9,
637  src_x, src_y,
638  s->h_edge_pos,
639  s->v_edge_pos);
640  ptr = s->sc.edge_emu_buffer;
641  }
642  dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
643  qpix_op[1][dxy](dest, ptr, s->linesize);
644 
645  mx += s->mv[dir][i][0] / 2;
646  my += s->mv[dir][i][1] / 2;
647  }
648  } else {
649  for (i = 0; i < 4; i++) {
650  hpel_motion(s,
651  dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
652  ref_picture[0],
653  mb_x * 16 + (i & 1) * 8,
654  mb_y * 16 + (i >> 1) * 8,
655  pix_op[1],
656  s->mv[dir][i][0],
657  s->mv[dir][i][1]);
658 
659  mx += s->mv[dir][i][0];
660  my += s->mv[dir][i][1];
661  }
662  }
663 
664  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
665  chroma_4mv_motion(s, dest_cb, dest_cr,
666  ref_picture, pix_op[1], mx, my);
667 }
668 
669 /**
670  * motion compensation of a single macroblock
671  * @param s context
672  * @param dest_y luma destination pointer
673  * @param dest_cb chroma cb/u destination pointer
674  * @param dest_cr chroma cr/v destination pointer
675  * @param dir direction (0->forward, 1->backward)
676  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
677  * @param pix_op halfpel motion compensation function (average or put normally)
678  * @param qpix_op qpel motion compensation function (average or put normally)
679  * the motion vectors are taken from s->mv and the MV type from s->mv_type
680  */
682  uint8_t *dest_y,
683  uint8_t *dest_cb,
684  uint8_t *dest_cr,
685  int dir,
686  uint8_t *const *ref_picture,
687  const op_pixels_func (*pix_op)[4],
688  const qpel_mc_func (*qpix_op)[16],
689  int is_mpeg12)
690 {
691  int i;
692  int mb_y = s->mb_y;
693 
694  if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
695  apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
696  return;
697  }
698 
699  switch (s->mv_type) {
700  case MV_TYPE_16X16:
701  if (CONFIG_MPEG4_DECODER && !is_mpeg12 && s->mcsel) {
702  ff_mpeg4_mcsel_motion(s, dest_y, dest_cb, dest_cr, ref_picture);
703  } else if (!is_mpeg12 && s->quarter_sample) {
704  qpel_motion(s, dest_y, dest_cb, dest_cr,
705  0, 0, 0,
706  ref_picture, pix_op, qpix_op,
707  s->mv[dir][0][0], s->mv[dir][0][1], 16);
708  } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
709  s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
710  ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
711  ref_picture, pix_op,
712  s->mv[dir][0][0], s->mv[dir][0][1], 16);
713  } else {
714  mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
715  ref_picture, pix_op,
716  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y);
717  }
718  break;
719  case MV_TYPE_8X8:
720  if (!is_mpeg12)
721  apply_8x8(s, dest_y, dest_cb, dest_cr,
722  dir, ref_picture, qpix_op, pix_op);
723  break;
724  case MV_TYPE_FIELD:
725  // Only MPEG-1/2 can have a picture_structure != PICT_FRAME here.
726  if (!CONFIG_SMALL)
727  av_assert2(is_mpeg12 || s->picture_structure == PICT_FRAME);
728  if ((!CONFIG_SMALL && !is_mpeg12) ||
729  s->picture_structure == PICT_FRAME) {
730  if (!is_mpeg12 && s->quarter_sample) {
731  for (i = 0; i < 2; i++)
732  qpel_motion(s, dest_y, dest_cb, dest_cr,
733  1, i, s->field_select[dir][i],
734  ref_picture, pix_op, qpix_op,
735  s->mv[dir][i][0], s->mv[dir][i][1], 8);
736  } else {
737  /* top field */
738  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
739  0, s->field_select[dir][0],
740  ref_picture, pix_op,
741  s->mv[dir][0][0], s->mv[dir][0][1], mb_y);
742  /* bottom field */
743  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
744  1, s->field_select[dir][1],
745  ref_picture, pix_op,
746  s->mv[dir][1][0], s->mv[dir][1][1], mb_y);
747  }
748  } else {
749  av_assert2(s->out_format == FMT_MPEG1);
750  if (s->picture_structure != s->field_select[dir][0] + 1 &&
751  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
752  ref_picture = s->cur_pic.ptr->f->data;
753  }
754 
755  mpeg_motion(s, dest_y, dest_cb, dest_cr,
756  s->field_select[dir][0],
757  ref_picture, pix_op,
758  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y >> 1);
759  }
760  break;
761  case MV_TYPE_16X8:
762  if (CONFIG_SMALL || is_mpeg12) {
763  for (i = 0; i < 2; i++) {
764  uint8_t *const *ref2picture;
765 
766  if (s->picture_structure == s->field_select[dir][i] + 1 ||
767  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
768  ref2picture = ref_picture;
769  } else {
770  ref2picture = s->cur_pic.ptr->f->data;
771  }
772 
773  mpeg_motion(s, dest_y, dest_cb, dest_cr,
774  s->field_select[dir][i],
775  ref2picture, pix_op,
776  s->mv[dir][i][0], s->mv[dir][i][1],
777  8, 1, (mb_y & ~1) + i);
778 
779  dest_y += 16 * s->linesize;
780  dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
781  dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
782  }
783  break;
784  }
785  case MV_TYPE_DMV:
786  if (CONFIG_SMALL || is_mpeg12) {
787  if (s->picture_structure == PICT_FRAME) {
788  for (i = 0; i < 2; i++) {
789  for (int j = 0; j < 2; j++)
790  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
791  j, j ^ i, ref_picture, pix_op,
792  s->mv[dir][2 * i + j][0],
793  s->mv[dir][2 * i + j][1], mb_y);
794  pix_op = s->hdsp.avg_pixels_tab;
795  }
796  } else {
797  for (i = 0; i < 2; i++) {
798  mpeg_motion(s, dest_y, dest_cb, dest_cr,
799  s->picture_structure != i + 1,
800  ref_picture, pix_op,
801  s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
802  16, 0, mb_y >> 1);
803 
804  // after put we make avg of the same block
805  pix_op = s->hdsp.avg_pixels_tab;
806 
807  /* opposite parity is always in the same frame if this is
808  * second field */
809  if (!s->first_field)
810  ref_picture = s->cur_pic.ptr->f->data;
811  }
812  }
813  break;
814  }
815  default: av_assert2(0);
816  }
817 }
818 
820  uint8_t *dest_y, uint8_t *dest_cb,
821  uint8_t *dest_cr, int dir,
822  uint8_t *const *ref_picture,
823  const op_pixels_func (*pix_op)[4],
824  const qpel_mc_func (*qpix_op)[16])
825 {
826  av_assert2(s->out_format == FMT_MPEG1 ||
827  s->out_format == FMT_H263 ||
828  s->out_format == FMT_H261);
829  prefetch_motion(s, ref_picture, dir);
830 
831 #if !CONFIG_SMALL
832  if (s->out_format == FMT_MPEG1)
833  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
834  ref_picture, pix_op, qpix_op, 1);
835  else
836 #endif
837  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
838  ref_picture, pix_op, qpix_op, 0);
839 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:107
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:265
apply_obmc
static void apply_obmc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4])
Definition: mpegvideo_motion.c:509
av_clip
#define av_clip
Definition: common.h:100
mem_internal.h
qpel_motion
static void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16], int motion_x, int motion_y, int h)
Definition: mpegvideo_motion.c:336
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:267
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:819
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:294
mpegvideo.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
FF_BUG_HPEL_CHROMA
#define FF_BUG_HPEL_CHROMA
Definition: avcodec.h:1370
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:64
mpegutils.h
MPVWorkPicture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:105
MPVWorkPicture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:103
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:269
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
put_obmc
static void put_obmc(uint8_t *dst, uint8_t *const src[5], int stride)
Definition: mpegvideo_motion.c:257
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:63
FF_BUG_QPEL_CHROMA2
#define FF_BUG_QPEL_CHROMA2
Definition: avcodec.h:1367
h261.h
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:129
mpeg_motion
static void mpeg_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_select, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_16x8, int mb_y)
Definition: mpegvideo_motion.c:219
chroma_4mv_motion
static void chroma_4mv_motion(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const op_pixels_func *pix_op, int mx, int my)
H.263 chroma 4mv motion compensation.
Definition: mpegvideo_motion.c:443
avassert.h
ff_mspel_motion
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
Definition: wmv2.c:50
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
OBMC_FILTER4
#define OBMC_FILTER4(x, t, l, m, r, b)
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
mpeg_motion_field
static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int bottom_field, int field_select, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int mb_y)
Definition: mpegvideo_motion.c:237
LOCAL_ALIGNED_8
#define LOCAL_ALIGNED_8(t, v,...)
Definition: mem_internal.h:126
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
hpel_motion
static int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, const op_pixels_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_motion.c:38
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:276
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:287
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1374
qpeldsp.h
apply_8x8
static void apply_8x8(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const qpel_mc_func(*qpix_op)[16], const op_pixels_func(*pix_op)[4])
Definition: mpegvideo_motion.c:596
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
wmv2.h
prefetch_motion
static void prefetch_motion(MpegEncContext *s, uint8_t *const *pix, int dir)
Definition: mpegvideo_motion.c:495
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:266
IS_INTRA
#define IS_INTRA(x, y)
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:127
OBMC_FILTER
#define OBMC_FILTER(x, t, l, m, r, b)
mpv_motion_internal
static av_always_inline void mpv_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16], int is_mpeg12)
motion compensation of a single macroblock
Definition: mpegvideo_motion.c:681
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:268
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:130
ff_mpeg4_mcsel_motion
void ff_mpeg4_mcsel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture)
Definition: mpeg4videodec.c:232
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
internal.h
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:287
av_always_inline
#define av_always_inline
Definition: attributes.h:49
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:634
MID
#define MID
FF_BUG_QPEL_CHROMA
#define FF_BUG_QPEL_CHROMA
Definition: avcodec.h:1365
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_h261_loop_filter
void ff_h261_loop_filter(MpegEncContext *s)
Definition: h261.c:61
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mpeg_motion_internal
static av_always_inline void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int is_16x8, int mb_y)
Definition: mpegvideo_motion.c:74
MPVWorkPicture
Definition: mpegpicture.h:95
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2070
obmc_motion
static void obmc_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, const op_pixels_func *pix_op, int16_t mv[5][2])
Definition: mpegvideo_motion.c:310
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
src
#define src
Definition: vp8dsp.c:248