FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
mpegvideo_motion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000,2001 Fabrice Bellard
3  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/internal.h"
28 #include "libavutil/mem_internal.h"
29 
30 #include "avcodec.h"
31 #include "h261.h"
32 #include "h263.h"
33 #include "mpegutils.h"
34 #include "mpegvideo.h"
35 #include "mpeg4videodec.h"
36 #include "qpeldsp.h"
37 #include "wmv2.h"
38 
39 static inline int hpel_motion(MpegEncContext *s,
40  uint8_t *dest, uint8_t *src,
41  int src_x, int src_y,
42  const op_pixels_func *pix_op,
43  int motion_x, int motion_y)
44 {
45  int dxy = 0;
46  int emu = 0;
47 
48  src_x += motion_x >> 1;
49  src_y += motion_y >> 1;
50 
51  /* WARNING: do no forget half pels */
52  src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
53  if (src_x != s->width)
54  dxy |= motion_x & 1;
55  src_y = av_clip(src_y, -16, s->height);
56  if (src_y != s->height)
57  dxy |= (motion_y & 1) << 1;
58  src += src_y * s->linesize + src_x;
59 
60  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) ||
61  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) {
62  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
63  s->linesize, s->linesize,
64  9, 9,
65  src_x, src_y,
66  s->h_edge_pos, s->v_edge_pos);
67  src = s->sc.edge_emu_buffer;
68  emu = 1;
69  }
70  pix_op[dxy](dest, src, s->linesize, 8);
71  return emu;
72 }
73 
74 static av_always_inline
76  uint8_t *dest_y,
77  uint8_t *dest_cb,
78  uint8_t *dest_cr,
79  int field_based,
80  int bottom_field,
81  int field_select,
82  uint8_t *const *ref_picture,
83  const op_pixels_func (*pix_op)[4],
84  int motion_x,
85  int motion_y,
86  int h,
87  int is_mpeg12,
88  int is_16x8,
89  int mb_y)
90 {
91  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
92  int dxy, uvdxy, mx, my, src_x, src_y,
93  uvsrc_x, uvsrc_y, v_edge_pos, block_y_half;
94  ptrdiff_t uvlinesize, linesize;
95 
96  v_edge_pos = s->v_edge_pos >> field_based;
97  linesize = s->cur_pic.linesize[0] << field_based;
98  uvlinesize = s->cur_pic.linesize[1] << field_based;
99  block_y_half = (field_based | is_16x8);
100 
101  dxy = ((motion_y & 1) << 1) | (motion_x & 1);
102  src_x = s->mb_x * 16 + (motion_x >> 1);
103  src_y = (mb_y << (4 - block_y_half)) + (motion_y >> 1);
104 
105  if (!is_mpeg12 && s->out_format == FMT_H263) {
106  if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
107  mx = (motion_x >> 1) | (motion_x & 1);
108  my = motion_y >> 1;
109  uvdxy = ((my & 1) << 1) | (mx & 1);
110  uvsrc_x = s->mb_x * 8 + (mx >> 1);
111  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
112  } else {
113  uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
114  uvsrc_x = src_x >> 1;
115  uvsrc_y = src_y >> 1;
116  }
117  // Even chroma mv's are full pel in H261
118  } else if (!CONFIG_SMALL && !is_mpeg12 ||
119  CONFIG_SMALL && s->out_format == FMT_H261) {
120  av_assert2(s->out_format == FMT_H261);
121  mx = motion_x / 4;
122  my = motion_y / 4;
123  uvdxy = 0;
124  uvsrc_x = s->mb_x * 8 + mx;
125  uvsrc_y = mb_y * 8 + my;
126  } else {
127  av_assert2(s->out_format == FMT_MPEG1);
128  if (s->chroma_y_shift) {
129  mx = motion_x / 2;
130  my = motion_y / 2;
131  uvdxy = ((my & 1) << 1) | (mx & 1);
132  uvsrc_x = s->mb_x * 8 + (mx >> 1);
133  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
134  } else {
135  if (s->chroma_x_shift) {
136  // Chroma422
137  mx = motion_x / 2;
138  uvdxy = ((motion_y & 1) << 1) | (mx & 1);
139  uvsrc_x = s->mb_x * 8 + (mx >> 1);
140  uvsrc_y = src_y;
141  } else {
142  // Chroma444
143  uvdxy = dxy;
144  uvsrc_x = src_x;
145  uvsrc_y = src_y;
146  }
147  }
148  }
149 
150  ptr_y = ref_picture[0] + src_y * linesize + src_x;
151  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
152  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
153 
154  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 15 , 0) ||
155  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 1) - h + 1, 0)) {
156  if (is_mpeg12 || (CONFIG_SMALL &&
157  (s->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
158  s->codec_id == AV_CODEC_ID_MPEG1VIDEO))) {
159  av_log(s->avctx, AV_LOG_DEBUG,
160  "MPEG motion vector out of boundary (%d %d)\n", src_x,
161  src_y);
162  return;
163  }
164  src_y = (unsigned)src_y << field_based;
165  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
166  s->linesize, s->linesize,
167  17, 17 + field_based,
168  src_x, src_y,
169  s->h_edge_pos, s->v_edge_pos);
170  ptr_y = s->sc.edge_emu_buffer;
171  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
172  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
173  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
174  if (s->workaround_bugs & FF_BUG_IEDGE)
175  vbuf -= s->uvlinesize;
176  uvsrc_y = (unsigned)uvsrc_y << field_based;
177  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
178  s->uvlinesize, s->uvlinesize,
179  9, 9 + field_based,
180  uvsrc_x, uvsrc_y,
181  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
182  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
183  s->uvlinesize, s->uvlinesize,
184  9, 9 + field_based,
185  uvsrc_x, uvsrc_y,
186  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
187  ptr_cb = ubuf;
188  ptr_cr = vbuf;
189  }
190  }
191 
192  /* FIXME use this for field pix too instead of the obnoxious hack which
193  * changes picture.data */
194  if (bottom_field) {
195  dest_y += s->linesize;
196  dest_cb += s->uvlinesize;
197  dest_cr += s->uvlinesize;
198  }
199 
200  if (field_select) {
201  ptr_y += s->linesize;
202  ptr_cb += s->uvlinesize;
203  ptr_cr += s->uvlinesize;
204  }
205 
206  pix_op[0][dxy](dest_y, ptr_y, linesize, h);
207 
208  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
209  pix_op[s->chroma_x_shift][uvdxy]
210  (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
211  pix_op[s->chroma_x_shift][uvdxy]
212  (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
213  }
214  if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
215  s->out_format == FMT_H261) {
217  }
218 }
219 /* apply one mpeg motion vector to the three components */
221  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
222  int field_select, uint8_t *const *ref_picture,
223  const op_pixels_func (*pix_op)[4],
224  int motion_x, int motion_y, int h, int is_16x8, int mb_y)
225 {
226 #if !CONFIG_SMALL
227  if (s->out_format == FMT_MPEG1)
228  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
229  field_select, ref_picture, pix_op,
230  motion_x, motion_y, h, 1, is_16x8, mb_y);
231  else
232 #endif
233  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
234  field_select, ref_picture, pix_op,
235  motion_x, motion_y, h, 0, is_16x8, mb_y);
236 }
237 
238 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
239  uint8_t *dest_cb, uint8_t *dest_cr,
240  int bottom_field, int field_select,
241  uint8_t *const *ref_picture,
242  const op_pixels_func (*pix_op)[4],
243  int motion_x, int motion_y, int mb_y)
244 {
245 #if !CONFIG_SMALL
246  if (s->out_format == FMT_MPEG1)
247  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
248  bottom_field, field_select, ref_picture, pix_op,
249  motion_x, motion_y, 8, 1, 0, mb_y);
250  else
251 #endif
252  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
253  bottom_field, field_select, ref_picture, pix_op,
254  motion_x, motion_y, 8, 0, 0, mb_y);
255 }
256 
257 // FIXME: SIMDify, avg variant, 16x16 version
258 static inline void put_obmc(uint8_t *dst, uint8_t *const src[5], int stride)
259 {
260  int x;
261  uint8_t *const top = src[1];
262  uint8_t *const left = src[2];
263  uint8_t *const mid = src[0];
264  uint8_t *const right = src[3];
265  uint8_t *const bottom = src[4];
266 #define OBMC_FILTER(x, t, l, m, r, b)\
267  dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
268 #define OBMC_FILTER4(x, t, l, m, r, b)\
269  OBMC_FILTER(x , t, l, m, r, b);\
270  OBMC_FILTER(x+1 , t, l, m, r, b);\
271  OBMC_FILTER(x +stride, t, l, m, r, b);\
272  OBMC_FILTER(x+1+stride, t, l, m, r, b);
273 
274  x = 0;
275  OBMC_FILTER (x , 2, 2, 4, 0, 0);
276  OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
277  OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
278  OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
279  OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
280  OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
281  x += stride;
282  OBMC_FILTER (x , 1, 2, 5, 0, 0);
283  OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
284  OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
285  OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
286  x += stride;
287  OBMC_FILTER4(x , 1, 2, 5, 0, 0);
288  OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
289  OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
290  OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
291  x += 2 * stride;
292  OBMC_FILTER4(x , 0, 2, 5, 0, 1);
293  OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
294  OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
295  OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
296  x += 2*stride;
297  OBMC_FILTER (x , 0, 2, 5, 0, 1);
298  OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
299  OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
300  OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
301  OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
302  OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
303  x += stride;
304  OBMC_FILTER (x , 0, 2, 4, 0, 2);
305  OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
306  OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
307  OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
308 }
309 
310 /* obmc for 1 8x8 luma block */
311 static inline void obmc_motion(MpegEncContext *s,
312  uint8_t *dest, uint8_t *src,
313  int src_x, int src_y,
314  const op_pixels_func *pix_op,
315  int16_t mv[5][2] /* mid top left right bottom */)
316 #define MID 0
317 {
318  int i;
319  uint8_t *ptr[5];
320 
321  av_assert2(s->quarter_sample == 0);
322 
323  for (i = 0; i < 5; i++) {
324  if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
325  ptr[i] = ptr[MID];
326  } else {
327  ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) +
328  s->linesize * 8 * (i >> 1);
329  hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
330  mv[i][0], mv[i][1]);
331  }
332  }
333 
334  put_obmc(dest, ptr, s->linesize);
335 }
336 
337 static inline void qpel_motion(MpegEncContext *s,
338  uint8_t *dest_y,
339  uint8_t *dest_cb,
340  uint8_t *dest_cr,
341  int field_based, int bottom_field,
342  int field_select, uint8_t *const *ref_picture,
343  const op_pixels_func (*pix_op)[4],
344  const qpel_mc_func (*qpix_op)[16],
345  int motion_x, int motion_y, int h)
346 {
347  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
348  int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
349  ptrdiff_t linesize, uvlinesize;
350 
351  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
352 
353  src_x = s->mb_x * 16 + (motion_x >> 2);
354  src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
355 
356  v_edge_pos = s->v_edge_pos >> field_based;
357  linesize = s->linesize << field_based;
358  uvlinesize = s->uvlinesize << field_based;
359 
360  if (field_based) {
361  mx = motion_x / 2;
362  my = motion_y >> 1;
363  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
364  static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
365  mx = (motion_x >> 1) + rtab[motion_x & 7];
366  my = (motion_y >> 1) + rtab[motion_y & 7];
367  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
368  mx = (motion_x >> 1) | (motion_x & 1);
369  my = (motion_y >> 1) | (motion_y & 1);
370  } else {
371  mx = motion_x / 2;
372  my = motion_y / 2;
373  }
374  mx = (mx >> 1) | (mx & 1);
375  my = (my >> 1) | (my & 1);
376 
377  uvdxy = (mx & 1) | ((my & 1) << 1);
378  mx >>= 1;
379  my >>= 1;
380 
381  uvsrc_x = s->mb_x * 8 + mx;
382  uvsrc_y = s->mb_y * (8 >> field_based) + my;
383 
384  ptr_y = ref_picture[0] + src_y * linesize + src_x;
385  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
386  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
387 
388  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 15 , 0) ||
389  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 3) - h + 1, 0)) {
390  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
391  s->linesize, s->linesize,
392  17, 17 + field_based,
393  src_x, src_y * (1 << field_based),
394  s->h_edge_pos, s->v_edge_pos);
395  ptr_y = s->sc.edge_emu_buffer;
396  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
397  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
398  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
399  if (s->workaround_bugs & FF_BUG_IEDGE)
400  vbuf -= s->uvlinesize;
401  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
402  s->uvlinesize, s->uvlinesize,
403  9, 9 + field_based,
404  uvsrc_x, uvsrc_y * (1 << field_based),
405  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
406  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
407  s->uvlinesize, s->uvlinesize,
408  9, 9 + field_based,
409  uvsrc_x, uvsrc_y * (1 << field_based),
410  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
411  ptr_cb = ubuf;
412  ptr_cr = vbuf;
413  }
414  }
415 
416  if (!field_based)
417  qpix_op[0][dxy](dest_y, ptr_y, linesize);
418  else {
419  if (bottom_field) {
420  dest_y += s->linesize;
421  dest_cb += s->uvlinesize;
422  dest_cr += s->uvlinesize;
423  }
424 
425  if (field_select) {
426  ptr_y += s->linesize;
427  ptr_cb += s->uvlinesize;
428  ptr_cr += s->uvlinesize;
429  }
430  // damn interlaced mode
431  // FIXME boundary mirroring is not exactly correct here
432  qpix_op[1][dxy](dest_y, ptr_y, linesize);
433  qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
434  }
435  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
436  pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
437  pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
438  }
439 }
440 
441 /**
442  * H.263 chroma 4mv motion compensation.
443  */
445  uint8_t *dest_cb, uint8_t *dest_cr,
446  uint8_t *const *ref_picture,
447  const op_pixels_func *pix_op,
448  int mx, int my)
449 {
450  const uint8_t *ptr;
451  int src_x, src_y, dxy, emu = 0;
452  ptrdiff_t offset;
453 
454  /* In case of 8X8, we construct a single chroma motion vector
455  * with a special rounding */
458 
459  dxy = ((my & 1) << 1) | (mx & 1);
460  mx >>= 1;
461  my >>= 1;
462 
463  src_x = s->mb_x * 8 + mx;
464  src_y = s->mb_y * 8 + my;
465  src_x = av_clip(src_x, -8, (s->width >> 1));
466  if (src_x == (s->width >> 1))
467  dxy &= ~1;
468  src_y = av_clip(src_y, -8, (s->height >> 1));
469  if (src_y == (s->height >> 1))
470  dxy &= ~2;
471 
472  offset = src_y * s->uvlinesize + src_x;
473  ptr = ref_picture[1] + offset;
474  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 7, 0) ||
475  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) {
476  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
477  s->uvlinesize, s->uvlinesize,
478  9, 9, src_x, src_y,
479  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
480  ptr = s->sc.edge_emu_buffer;
481  emu = 1;
482  }
483  pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
484 
485  ptr = ref_picture[2] + offset;
486  if (emu) {
487  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
488  s->uvlinesize, s->uvlinesize,
489  9, 9, src_x, src_y,
490  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
491  ptr = s->sc.edge_emu_buffer;
492  }
493  pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
494 }
495 
496 static inline void prefetch_motion(MpegEncContext *s, uint8_t *const *pix, int dir)
497 {
498  /* fetch pixels for estimated mv 4 macroblocks ahead
499  * optimized for 64byte cache lines */
500  const int shift = s->quarter_sample ? 2 : 1;
501  const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
502  const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
503  int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
504 
505  s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
506  off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
507  s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
508 }
509 
510 static inline void apply_obmc(MpegEncContext *s,
511  uint8_t *dest_y,
512  uint8_t *dest_cb,
513  uint8_t *dest_cr,
514  uint8_t *const *ref_picture,
515  const op_pixels_func (*pix_op)[4])
516 {
517  LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
518  const MPVWorkPicture *cur_frame = &s->cur_pic;
519  int mb_x = s->mb_x;
520  int mb_y = s->mb_y;
521  const int xy = mb_x + mb_y * s->mb_stride;
522  const int mot_stride = s->b8_stride;
523  const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
524  int mx, my, i;
525 
526  av_assert2(!s->mb_skipped);
527 
528  AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
529  AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
530 
531  AV_COPY32(mv_cache[2][1],
532  cur_frame->motion_val[0][mot_xy + mot_stride]);
533  AV_COPY32(mv_cache[2][2],
534  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
535 
536  AV_COPY32(mv_cache[3][1],
537  cur_frame->motion_val[0][mot_xy + mot_stride]);
538  AV_COPY32(mv_cache[3][2],
539  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
540 
541  if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
542  AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
543  AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
544  } else {
545  AV_COPY32(mv_cache[0][1],
546  cur_frame->motion_val[0][mot_xy - mot_stride]);
547  AV_COPY32(mv_cache[0][2],
548  cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
549  }
550 
551  if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
552  AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
553  AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
554  } else {
555  AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
556  AV_COPY32(mv_cache[2][0],
557  cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
558  }
559 
560  if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
561  AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
562  AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
563  } else {
564  AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
565  AV_COPY32(mv_cache[2][3],
566  cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
567  }
568 
569  mx = 0;
570  my = 0;
571  for (i = 0; i < 4; i++) {
572  const int x = (i & 1) + 1;
573  const int y = (i >> 1) + 1;
574  int16_t mv[5][2] = {
575  { mv_cache[y][x][0], mv_cache[y][x][1] },
576  { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
577  { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
578  { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
579  { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
580  };
581  // FIXME cleanup
582  obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
583  ref_picture[0],
584  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
585  pix_op[1],
586  mv);
587 
588  mx += mv[0][0];
589  my += mv[0][1];
590  }
591  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
592  chroma_4mv_motion(s, dest_cb, dest_cr,
593  ref_picture, pix_op[1],
594  mx, my);
595 }
596 
597 static inline void apply_8x8(MpegEncContext *s,
598  uint8_t *dest_y,
599  uint8_t *dest_cb,
600  uint8_t *dest_cr,
601  int dir,
602  uint8_t *const *ref_picture,
603  const qpel_mc_func (*qpix_op)[16],
604  const op_pixels_func (*pix_op)[4])
605 {
606  int dxy, mx, my, src_x, src_y;
607  int i;
608  int mb_x = s->mb_x;
609  int mb_y = s->mb_y;
610  uint8_t *dest;
611  const uint8_t *ptr;
612 
613  mx = 0;
614  my = 0;
615  if (s->quarter_sample) {
616  for (i = 0; i < 4; i++) {
617  int motion_x = s->mv[dir][i][0];
618  int motion_y = s->mv[dir][i][1];
619 
620  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
621  src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
622  src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
623 
624  /* WARNING: do no forget half pels */
625  src_x = av_clip(src_x, -16, s->width);
626  if (src_x == s->width)
627  dxy &= ~3;
628  src_y = av_clip(src_y, -16, s->height);
629  if (src_y == s->height)
630  dxy &= ~12;
631 
632  ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
633  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) ||
634  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) {
635  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
636  s->linesize, s->linesize,
637  9, 9,
638  src_x, src_y,
639  s->h_edge_pos,
640  s->v_edge_pos);
641  ptr = s->sc.edge_emu_buffer;
642  }
643  dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
644  qpix_op[1][dxy](dest, ptr, s->linesize);
645 
646  mx += s->mv[dir][i][0] / 2;
647  my += s->mv[dir][i][1] / 2;
648  }
649  } else {
650  for (i = 0; i < 4; i++) {
651  hpel_motion(s,
652  dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
653  ref_picture[0],
654  mb_x * 16 + (i & 1) * 8,
655  mb_y * 16 + (i >> 1) * 8,
656  pix_op[1],
657  s->mv[dir][i][0],
658  s->mv[dir][i][1]);
659 
660  mx += s->mv[dir][i][0];
661  my += s->mv[dir][i][1];
662  }
663  }
664 
665  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
666  chroma_4mv_motion(s, dest_cb, dest_cr,
667  ref_picture, pix_op[1], mx, my);
668 }
669 
670 /**
671  * motion compensation of a single macroblock
672  * @param s context
673  * @param dest_y luma destination pointer
674  * @param dest_cb chroma cb/u destination pointer
675  * @param dest_cr chroma cr/v destination pointer
676  * @param dir direction (0->forward, 1->backward)
677  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
678  * @param pix_op halfpel motion compensation function (average or put normally)
679  * @param qpix_op qpel motion compensation function (average or put normally)
680  * the motion vectors are taken from s->mv and the MV type from s->mv_type
681  */
683  uint8_t *dest_y,
684  uint8_t *dest_cb,
685  uint8_t *dest_cr,
686  int dir,
687  uint8_t *const *ref_picture,
688  const op_pixels_func (*pix_op)[4],
689  const qpel_mc_func (*qpix_op)[16],
690  int is_mpeg12)
691 {
692  int i;
693  int mb_y = s->mb_y;
694 
695  if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
696  apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
697  return;
698  }
699 
700  switch (s->mv_type) {
701  case MV_TYPE_16X16:
702  if (CONFIG_MPEG4_DECODER && !is_mpeg12 && s->mcsel) {
703  ff_mpeg4_mcsel_motion(s, dest_y, dest_cb, dest_cr, ref_picture);
704  } else if (!is_mpeg12 && s->quarter_sample) {
705  qpel_motion(s, dest_y, dest_cb, dest_cr,
706  0, 0, 0,
707  ref_picture, pix_op, qpix_op,
708  s->mv[dir][0][0], s->mv[dir][0][1], 16);
709  } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
710  s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
711  ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
712  ref_picture, pix_op,
713  s->mv[dir][0][0], s->mv[dir][0][1], 16);
714  } else {
715  mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
716  ref_picture, pix_op,
717  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y);
718  }
719  break;
720  case MV_TYPE_8X8:
721  if (!is_mpeg12)
722  apply_8x8(s, dest_y, dest_cb, dest_cr,
723  dir, ref_picture, qpix_op, pix_op);
724  break;
725  case MV_TYPE_FIELD:
726  // Only MPEG-1/2 can have a picture_structure != PICT_FRAME here.
727  if (!CONFIG_SMALL)
728  av_assert2(is_mpeg12 || s->picture_structure == PICT_FRAME);
729  if ((!CONFIG_SMALL && !is_mpeg12) ||
730  s->picture_structure == PICT_FRAME) {
731  if (!is_mpeg12 && s->quarter_sample) {
732  for (i = 0; i < 2; i++)
733  qpel_motion(s, dest_y, dest_cb, dest_cr,
734  1, i, s->field_select[dir][i],
735  ref_picture, pix_op, qpix_op,
736  s->mv[dir][i][0], s->mv[dir][i][1], 8);
737  } else {
738  /* top field */
739  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
740  0, s->field_select[dir][0],
741  ref_picture, pix_op,
742  s->mv[dir][0][0], s->mv[dir][0][1], mb_y);
743  /* bottom field */
744  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
745  1, s->field_select[dir][1],
746  ref_picture, pix_op,
747  s->mv[dir][1][0], s->mv[dir][1][1], mb_y);
748  }
749  } else {
750  av_assert2(s->out_format == FMT_MPEG1);
751  if (s->picture_structure != s->field_select[dir][0] + 1 &&
752  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
753  ref_picture = s->cur_pic.ptr->f->data;
754  }
755 
756  mpeg_motion(s, dest_y, dest_cb, dest_cr,
757  s->field_select[dir][0],
758  ref_picture, pix_op,
759  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y >> 1);
760  }
761  break;
762  case MV_TYPE_16X8:
763  if (CONFIG_SMALL || is_mpeg12) {
764  for (i = 0; i < 2; i++) {
765  uint8_t *const *ref2picture;
766 
767  if (s->picture_structure == s->field_select[dir][i] + 1 ||
768  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
769  ref2picture = ref_picture;
770  } else {
771  ref2picture = s->cur_pic.ptr->f->data;
772  }
773 
774  mpeg_motion(s, dest_y, dest_cb, dest_cr,
775  s->field_select[dir][i],
776  ref2picture, pix_op,
777  s->mv[dir][i][0], s->mv[dir][i][1],
778  8, 1, (mb_y & ~1) + i);
779 
780  dest_y += 16 * s->linesize;
781  dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
782  dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
783  }
784  break;
785  }
786  case MV_TYPE_DMV:
787  if (CONFIG_SMALL || is_mpeg12) {
788  if (s->picture_structure == PICT_FRAME) {
789  for (i = 0; i < 2; i++) {
790  for (int j = 0; j < 2; j++)
791  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
792  j, j ^ i, ref_picture, pix_op,
793  s->mv[dir][2 * i + j][0],
794  s->mv[dir][2 * i + j][1], mb_y);
795  pix_op = s->hdsp.avg_pixels_tab;
796  }
797  } else {
798  for (i = 0; i < 2; i++) {
799  mpeg_motion(s, dest_y, dest_cb, dest_cr,
800  s->picture_structure != i + 1,
801  ref_picture, pix_op,
802  s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
803  16, 0, mb_y >> 1);
804 
805  // after put we make avg of the same block
806  pix_op = s->hdsp.avg_pixels_tab;
807 
808  /* opposite parity is always in the same frame if this is
809  * second field */
810  if (!s->first_field)
811  ref_picture = s->cur_pic.ptr->f->data;
812  }
813  }
814  break;
815  }
816  default: av_assert2(0);
817  }
818 }
819 
821  uint8_t *dest_y, uint8_t *dest_cb,
822  uint8_t *dest_cr, int dir,
823  uint8_t *const *ref_picture,
824  const op_pixels_func (*pix_op)[4],
825  const qpel_mc_func (*qpix_op)[16])
826 {
827  av_assert2(s->out_format == FMT_MPEG1 ||
828  s->out_format == FMT_H263 ||
829  s->out_format == FMT_H261);
830  prefetch_motion(s, ref_picture, dir);
831 
832 #if !CONFIG_SMALL
833  if (s->out_format == FMT_MPEG1)
834  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
835  ref_picture, pix_op, qpix_op, 1);
836  else
837 #endif
838  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
839  ref_picture, pix_op, qpix_op, 0);
840 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:192
apply_obmc
static void apply_obmc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4])
Definition: mpegvideo_motion.c:510
av_clip
#define av_clip
Definition: common.h:100
mem_internal.h
qpel_motion
static void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16], int motion_x, int motion_y, int h)
Definition: mpegvideo_motion.c:337
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:194
mv
static const int8_t mv[256][2]
Definition: 4xm.c:81
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:820
mpegvideo.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
FF_BUG_HPEL_CHROMA
#define FF_BUG_HPEL_CHROMA
Definition: avcodec.h:1341
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:55
mpegutils.h
MPVWorkPicture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:105
MPVWorkPicture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:103
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:196
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
put_obmc
static void put_obmc(uint8_t *dst, uint8_t *const src[5], int stride)
Definition: mpegvideo_motion.c:258
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:54
FF_BUG_QPEL_CHROMA2
#define FF_BUG_QPEL_CHROMA2
Definition: avcodec.h:1338
h261.h
mpeg_motion
static void mpeg_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_select, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_16x8, int mb_y)
Definition: mpegvideo_motion.c:220
chroma_4mv_motion
static void chroma_4mv_motion(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const op_pixels_func *pix_op, int mx, int my)
H.263 chroma 4mv motion compensation.
Definition: mpegvideo_motion.c:444
avassert.h
ff_mspel_motion
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
Definition: wmv2.c:50
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
OBMC_FILTER4
#define OBMC_FILTER4(x, t, l, m, r, b)
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
mpeg_motion_field
static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int bottom_field, int field_select, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int mb_y)
Definition: mpegvideo_motion.c:238
LOCAL_ALIGNED_8
#define LOCAL_ALIGNED_8(t, v,...)
Definition: mem_internal.h:128
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
hpel_motion
static int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, const op_pixels_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_motion.c:39
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:56
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1345
qpeldsp.h
apply_8x8
static void apply_8x8(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const qpel_mc_func(*qpix_op)[16], const op_pixels_func(*pix_op)[4])
Definition: mpegvideo_motion.c:597
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
wmv2.h
prefetch_motion
static void prefetch_motion(MpegEncContext *s, uint8_t *const *pix, int dir)
Definition: mpegvideo_motion.c:496
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:193
IS_INTRA
#define IS_INTRA(x, y)
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: h263.h:30
OBMC_FILTER
#define OBMC_FILTER(x, t, l, m, r, b)
mpv_motion_internal
static av_always_inline void mpv_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16], int is_mpeg12)
motion compensation of a single macroblock
Definition: mpegvideo_motion.c:682
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:195
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
ff_mpeg4_mcsel_motion
void ff_mpeg4_mcsel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture)
Definition: mpeg4videodec.c:239
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
internal.h
av_always_inline
#define av_always_inline
Definition: attributes.h:49
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:634
MID
#define MID
FF_BUG_QPEL_CHROMA
#define FF_BUG_QPEL_CHROMA
Definition: avcodec.h:1336
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
ff_h261_loop_filter
void ff_h261_loop_filter(MpegEncContext *s)
Definition: h261.c:61
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
mpeg_motion_internal
static av_always_inline void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int is_16x8, int mb_y)
Definition: mpegvideo_motion.c:75
MPVWorkPicture
Definition: mpegpicture.h:95
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2070
obmc_motion
static void obmc_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, const op_pixels_func *pix_op, int16_t mv[5][2])
Definition: mpegvideo_motion.c:311
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:64
src
#define src
Definition: vp8dsp.c:248
h263.h