FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "avcodec.h"
34 #include "dsputil.h"
35 #include "h264chroma.h"
36 #include "internal.h"
37 #include "mathops.h"
38 #include "mpegvideo.h"
39 #include "mjpegenc.h"
40 #include "msmpeg4.h"
41 #include "xvmc_internal.h"
42 #include "thread.h"
43 #include <limits.h>
44 
46  int16_t *block, int n, int qscale);
48  int16_t *block, int n, int qscale);
50  int16_t *block, int n, int qscale);
52  int16_t *block, int n, int qscale);
54  int16_t *block, int n, int qscale);
56  int16_t *block, int n, int qscale);
58  int16_t *block, int n, int qscale);
59 
61 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
62  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
63  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
64 };
65 
67 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
68  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
69  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
70  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
71  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
72  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
73  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 };
77 
78 static const uint8_t mpeg2_dc_scale_table1[128] = {
79 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
80  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
81  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
82  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
83  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88 };
89 
90 static const uint8_t mpeg2_dc_scale_table2[128] = {
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
92  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
96  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100 };
101 
102 static const uint8_t mpeg2_dc_scale_table3[128] = {
103 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
104  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
107  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
108  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
109  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112 };
113 
114 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
119 };
120 
124 };
125 
126 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
127  int (*mv)[2][4][2],
128  int mb_x, int mb_y, int mb_intra, int mb_skipped)
129 {
130  MpegEncContext *s = opaque;
131 
132  s->mv_dir = mv_dir;
133  s->mv_type = mv_type;
134  s->mb_intra = mb_intra;
135  s->mb_skipped = mb_skipped;
136  s->mb_x = mb_x;
137  s->mb_y = mb_y;
138  memcpy(s->mv, mv, sizeof(*mv));
139 
142 
143  s->dsp.clear_blocks(s->block[0]);
144 
145  s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
146  s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
147  s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
148 
149  assert(ref == 0);
150  ff_MPV_decode_mb(s, s->block);
151 }
152 
153 /* init common dct for both encoder and decoder */
155 {
156  ff_dsputil_init(&s->dsp, s->avctx);
157  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
158  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
160 
166  if (s->flags & CODEC_FLAG_BITEXACT)
169 
170 #if ARCH_X86
172 #elif ARCH_ALPHA
174 #elif ARCH_ARM
176 #elif ARCH_BFIN
178 #elif ARCH_PPC
180 #endif
181 
182  /* load & permutate scantables
183  * note: only wmv uses different ones
184  */
185  if (s->alternate_scan) {
188  } else {
191  }
194 
195  return 0;
196 }
197 
199 {
200  int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
201 
202  // edge emu needs blocksize + filter length - 1
203  // (= 17x17 for halfpel / 21x21 for h264)
204  // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
205  // at uvlinesize. It supports only YUV420 so 24x24 is enough
206  // linesize * interlaced * MBsize
207  FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
208  fail);
209 
210  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
211  fail)
212  s->me.temp = s->me.scratchpad;
213  s->rd_scratchpad = s->me.scratchpad;
214  s->b_scratchpad = s->me.scratchpad;
215  s->obmc_scratchpad = s->me.scratchpad + 16;
216 
217  return 0;
218 fail:
220  return AVERROR(ENOMEM);
221 }
222 
223 /**
224  * Allocate a frame buffer
225  */
227 {
228  int r, ret;
229 
230  pic->tf.f = &pic->f;
231  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
234  r = ff_thread_get_buffer(s->avctx, &pic->tf,
235  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
236  else {
237  pic->f.width = s->avctx->width;
238  pic->f.height = s->avctx->height;
239  pic->f.format = s->avctx->pix_fmt;
240  r = avcodec_default_get_buffer2(s->avctx, &pic->f, 0);
241  }
242 
243  if (r < 0 || !pic->f.data[0]) {
244  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %p)\n",
245  r, pic->f.data[0]);
246  return -1;
247  }
248 
249  if (s->avctx->hwaccel) {
250  assert(!pic->hwaccel_picture_private);
251  if (s->avctx->hwaccel->priv_data_size) {
253  if (!pic->hwaccel_priv_buf) {
254  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
255  return -1;
256  }
258  }
259  }
260 
261  if (s->linesize && (s->linesize != pic->f.linesize[0] ||
262  s->uvlinesize != pic->f.linesize[1])) {
264  "get_buffer() failed (stride changed)\n");
265  ff_mpeg_unref_picture(s, pic);
266  return -1;
267  }
268 
269  if (pic->f.linesize[1] != pic->f.linesize[2]) {
271  "get_buffer() failed (uv stride mismatch)\n");
272  ff_mpeg_unref_picture(s, pic);
273  return -1;
274  }
275 
276  if (!s->edge_emu_buffer &&
277  (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
279  "get_buffer() failed to allocate context scratch buffers.\n");
280  ff_mpeg_unref_picture(s, pic);
281  return ret;
282  }
283 
284  return 0;
285 }
286 
287 static void free_picture_tables(Picture *pic)
288 {
289  int i;
290 
291  pic->alloc_mb_width =
292  pic->alloc_mb_height = 0;
293 
300 
301  for (i = 0; i < 2; i++) {
303  av_buffer_unref(&pic->ref_index_buf[i]);
304  }
305 }
306 
308 {
309  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
310  const int mb_array_size = s->mb_stride * s->mb_height;
311  const int b8_array_size = s->b8_stride * s->mb_height * 2;
312  int i;
313 
314 
315  pic->mbskip_table_buf = av_buffer_allocz(mb_array_size + 2);
316  pic->qscale_table_buf = av_buffer_allocz(big_mb_num + s->mb_stride);
317  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) *
318  sizeof(uint32_t));
319  if (!pic->mbskip_table_buf || !pic->qscale_table_buf || !pic->mb_type_buf)
320  return AVERROR(ENOMEM);
321 
322  if (s->encoding) {
323  pic->mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
324  pic->mc_mb_var_buf = av_buffer_allocz(mb_array_size * sizeof(int16_t));
325  pic->mb_mean_buf = av_buffer_allocz(mb_array_size);
326  if (!pic->mb_var_buf || !pic->mc_mb_var_buf || !pic->mb_mean_buf)
327  return AVERROR(ENOMEM);
328  }
329 
330  if (s->out_format == FMT_H263 || s->encoding ||
331  (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
332  int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
333  int ref_index_size = 4 * mb_array_size;
334 
335  for (i = 0; mv_size && i < 2; i++) {
336  pic->motion_val_buf[i] = av_buffer_allocz(mv_size);
337  pic->ref_index_buf[i] = av_buffer_allocz(ref_index_size);
338  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
339  return AVERROR(ENOMEM);
340  }
341  }
342 
343  pic->alloc_mb_width = s->mb_width;
344  pic->alloc_mb_height = s->mb_height;
345 
346  return 0;
347 }
348 
350 {
351  int ret, i;
352 #define MAKE_WRITABLE(table) \
353 do {\
354  if (pic->table &&\
355  (ret = av_buffer_make_writable(&pic->table)) < 0)\
356  return ret;\
357 } while (0)
358 
359  MAKE_WRITABLE(mb_var_buf);
360  MAKE_WRITABLE(mc_mb_var_buf);
361  MAKE_WRITABLE(mb_mean_buf);
362  MAKE_WRITABLE(mbskip_table_buf);
363  MAKE_WRITABLE(qscale_table_buf);
364  MAKE_WRITABLE(mb_type_buf);
365 
366  for (i = 0; i < 2; i++) {
367  MAKE_WRITABLE(motion_val_buf[i]);
368  MAKE_WRITABLE(ref_index_buf[i]);
369  }
370 
371  return 0;
372 }
373 
374 /**
375  * Allocate a Picture.
376  * The pixels are allocated/set by calling get_buffer() if shared = 0
377  */
378 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
379 {
380  int i, ret;
381 
382  if (pic->qscale_table_buf)
383  if ( pic->alloc_mb_width != s->mb_width
384  || pic->alloc_mb_height != s->mb_height)
385  free_picture_tables(pic);
386 
387  if (shared) {
388  assert(pic->f.data[0]);
389  pic->shared = 1;
390  } else {
391  assert(!pic->f.data[0]);
392 
393  if (alloc_frame_buffer(s, pic) < 0)
394  return -1;
395 
396  s->linesize = pic->f.linesize[0];
397  s->uvlinesize = pic->f.linesize[1];
398  }
399 
400  if (!pic->qscale_table_buf)
401  ret = alloc_picture_tables(s, pic);
402  else
403  ret = make_tables_writable(pic);
404  if (ret < 0)
405  goto fail;
406 
407  if (s->encoding) {
408  pic->mb_var = (uint16_t*)pic->mb_var_buf->data;
409  pic->mc_mb_var = (uint16_t*)pic->mc_mb_var_buf->data;
410  pic->mb_mean = pic->mb_mean_buf->data;
411  }
412 
413  pic->mbskip_table = pic->mbskip_table_buf->data;
414  pic->qscale_table = pic->qscale_table_buf->data + 2 * s->mb_stride + 1;
415  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
416 
417  if (pic->motion_val_buf[0]) {
418  for (i = 0; i < 2; i++) {
419  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
420  pic->ref_index[i] = pic->ref_index_buf[i]->data;
421  }
422  }
423 
424  return 0;
425 fail:
426  av_log(s->avctx, AV_LOG_ERROR, "Error allocating a picture.\n");
427  ff_mpeg_unref_picture(s, pic);
428  free_picture_tables(pic);
429  return AVERROR(ENOMEM);
430 }
431 
432 /**
433  * Deallocate a picture.
434  */
436 {
437  int off = offsetof(Picture, mb_mean) + sizeof(pic->mb_mean);
438 
439  pic->tf.f = &pic->f;
440  /* WM Image / Screen codecs allocate internal buffers with different
441  * dimensions / colorspaces; ignore user-defined callbacks for these. */
442  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
445  ff_thread_release_buffer(s->avctx, &pic->tf);
446  else
447  av_frame_unref(&pic->f);
448 
450 
451  if (pic->needs_realloc)
452  free_picture_tables(pic);
453 
454  memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
455 }
456 
458 {
459  int i;
460 
461 #define UPDATE_TABLE(table)\
462 do {\
463  if (src->table &&\
464  (!dst->table || dst->table->buffer != src->table->buffer)) {\
465  av_buffer_unref(&dst->table);\
466  dst->table = av_buffer_ref(src->table);\
467  if (!dst->table) {\
468  free_picture_tables(dst);\
469  return AVERROR(ENOMEM);\
470  }\
471  }\
472 } while (0)
473 
474  UPDATE_TABLE(mb_var_buf);
475  UPDATE_TABLE(mc_mb_var_buf);
476  UPDATE_TABLE(mb_mean_buf);
477  UPDATE_TABLE(mbskip_table_buf);
478  UPDATE_TABLE(qscale_table_buf);
479  UPDATE_TABLE(mb_type_buf);
480  for (i = 0; i < 2; i++) {
481  UPDATE_TABLE(motion_val_buf[i]);
482  UPDATE_TABLE(ref_index_buf[i]);
483  }
484 
485  dst->mb_var = src->mb_var;
486  dst->mc_mb_var = src->mc_mb_var;
487  dst->mb_mean = src->mb_mean;
488  dst->mbskip_table = src->mbskip_table;
489  dst->qscale_table = src->qscale_table;
490  dst->mb_type = src->mb_type;
491  for (i = 0; i < 2; i++) {
492  dst->motion_val[i] = src->motion_val[i];
493  dst->ref_index[i] = src->ref_index[i];
494  }
495 
496  dst->alloc_mb_width = src->alloc_mb_width;
497  dst->alloc_mb_height = src->alloc_mb_height;
498 
499  return 0;
500 }
501 
503 {
504  int ret;
505 
506  av_assert0(!dst->f.buf[0]);
507  av_assert0(src->f.buf[0]);
508 
509  src->tf.f = &src->f;
510  dst->tf.f = &dst->f;
511  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
512  if (ret < 0)
513  goto fail;
514 
515  ret = update_picture_tables(dst, src);
516  if (ret < 0)
517  goto fail;
518 
519  if (src->hwaccel_picture_private) {
521  if (!dst->hwaccel_priv_buf)
522  goto fail;
524  }
525 
526  dst->field_picture = src->field_picture;
527  dst->mb_var_sum = src->mb_var_sum;
528  dst->mc_mb_var_sum = src->mc_mb_var_sum;
529  dst->b_frame_score = src->b_frame_score;
530  dst->needs_realloc = src->needs_realloc;
531  dst->reference = src->reference;
532  dst->shared = src->shared;
533 
534  return 0;
535 fail:
536  ff_mpeg_unref_picture(s, dst);
537  return ret;
538 }
539 
541 {
542  int y_size = s->b8_stride * (2 * s->mb_height + 1);
543  int c_size = s->mb_stride * (s->mb_height + 1);
544  int yc_size = y_size + 2 * c_size;
545  int i;
546 
547  s->edge_emu_buffer =
548  s->me.scratchpad =
549  s->me.temp =
550  s->rd_scratchpad =
551  s->b_scratchpad =
552  s->obmc_scratchpad = NULL;
553 
554  if (s->encoding) {
555  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
556  ME_MAP_SIZE * sizeof(uint32_t), fail)
558  ME_MAP_SIZE * sizeof(uint32_t), fail)
559  if (s->avctx->noise_reduction) {
561  2 * 64 * sizeof(int), fail)
562  }
563  }
564  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
565  s->block = s->blocks[0];
566 
567  for (i = 0; i < 12; i++) {
568  s->pblocks[i] = &s->block[i];
569  }
570 
571  if (s->out_format == FMT_H263) {
572  /* ac values */
574  yc_size * sizeof(int16_t) * 16, fail);
575  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
576  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
577  s->ac_val[2] = s->ac_val[1] + c_size;
578  }
579 
580  return 0;
581 fail:
582  return -1; // free() through ff_MPV_common_end()
583 }
584 
586 {
587  if (s == NULL)
588  return;
589 
591  av_freep(&s->me.scratchpad);
592  s->me.temp =
593  s->rd_scratchpad =
594  s->b_scratchpad =
595  s->obmc_scratchpad = NULL;
596 
597  av_freep(&s->dct_error_sum);
598  av_freep(&s->me.map);
599  av_freep(&s->me.score_map);
600  av_freep(&s->blocks);
601  av_freep(&s->ac_val_base);
602  s->block = NULL;
603 }
604 
606 {
607 #define COPY(a) bak->a = src->a
608  COPY(edge_emu_buffer);
609  COPY(me.scratchpad);
610  COPY(me.temp);
611  COPY(rd_scratchpad);
612  COPY(b_scratchpad);
613  COPY(obmc_scratchpad);
614  COPY(me.map);
615  COPY(me.score_map);
616  COPY(blocks);
617  COPY(block);
618  COPY(start_mb_y);
619  COPY(end_mb_y);
620  COPY(me.map_generation);
621  COPY(pb);
622  COPY(dct_error_sum);
623  COPY(dct_count[0]);
624  COPY(dct_count[1]);
625  COPY(ac_val_base);
626  COPY(ac_val[0]);
627  COPY(ac_val[1]);
628  COPY(ac_val[2]);
629 #undef COPY
630 }
631 
633 {
634  MpegEncContext bak;
635  int i, ret;
636  // FIXME copy only needed parts
637  // START_TIMER
638  backup_duplicate_context(&bak, dst);
639  memcpy(dst, src, sizeof(MpegEncContext));
640  backup_duplicate_context(dst, &bak);
641  for (i = 0; i < 12; i++) {
642  dst->pblocks[i] = &dst->block[i];
643  }
644  if (!dst->edge_emu_buffer &&
645  (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
646  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
647  "scratch buffers.\n");
648  return ret;
649  }
650  // STOP_TIMER("update_duplicate_context")
651  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
652  return 0;
653 }
654 
656  const AVCodecContext *src)
657 {
658  int i, ret;
659  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
660 
661  if (dst == src)
662  return 0;
663 
664  av_assert0(s != s1);
665 
666  // FIXME can parameters change on I-frames?
667  // in that case dst may need a reinit
668  if (!s->context_initialized) {
669  memcpy(s, s1, sizeof(MpegEncContext));
670 
671  s->avctx = dst;
672  s->bitstream_buffer = NULL;
674 
675  if (s1->context_initialized){
676 // s->picture_range_start += MAX_PICTURE_COUNT;
677 // s->picture_range_end += MAX_PICTURE_COUNT;
678  if((ret = ff_MPV_common_init(s)) < 0){
679  memset(s, 0, sizeof(MpegEncContext));
680  s->avctx = dst;
681  return ret;
682  }
683  }
684  }
685 
686  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
687  s->context_reinit = 0;
688  s->height = s1->height;
689  s->width = s1->width;
690  if ((ret = ff_MPV_common_frame_size_change(s)) < 0)
691  return ret;
692  }
693 
694  s->avctx->coded_height = s1->avctx->coded_height;
695  s->avctx->coded_width = s1->avctx->coded_width;
696  s->avctx->width = s1->avctx->width;
697  s->avctx->height = s1->avctx->height;
698 
699  s->coded_picture_number = s1->coded_picture_number;
700  s->picture_number = s1->picture_number;
701  s->input_picture_number = s1->input_picture_number;
702 
703  av_assert0(!s->picture || s->picture != s1->picture);
704  if(s->picture)
705  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
706  ff_mpeg_unref_picture(s, &s->picture[i]);
707  if (s1->picture[i].f.data[0] &&
708  (ret = ff_mpeg_ref_picture(s, &s->picture[i], &s1->picture[i])) < 0)
709  return ret;
710  }
711 
712 #define UPDATE_PICTURE(pic)\
713 do {\
714  ff_mpeg_unref_picture(s, &s->pic);\
715  if (s1->pic.f.data[0])\
716  ret = ff_mpeg_ref_picture(s, &s->pic, &s1->pic);\
717  else\
718  ret = update_picture_tables(&s->pic, &s1->pic);\
719  if (ret < 0)\
720  return ret;\
721 } while (0)
722 
723  UPDATE_PICTURE(current_picture);
724  UPDATE_PICTURE(last_picture);
725  UPDATE_PICTURE(next_picture);
726 
727  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
728  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
729  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
730 
731  // Error/bug resilience
732  s->next_p_frame_damaged = s1->next_p_frame_damaged;
733  s->workaround_bugs = s1->workaround_bugs;
734  s->padding_bug_score = s1->padding_bug_score;
735 
736  // MPEG4 timing info
737  memcpy(&s->time_increment_bits, &s1->time_increment_bits,
738  (char *) &s1->shape - (char *) &s1->time_increment_bits);
739 
740  // B-frame info
741  s->max_b_frames = s1->max_b_frames;
742  s->low_delay = s1->low_delay;
743  s->droppable = s1->droppable;
744 
745  // DivX handling (doesn't work)
746  s->divx_packed = s1->divx_packed;
747 
748  if (s1->bitstream_buffer) {
749  if (s1->bitstream_buffer_size +
753  s1->allocated_bitstream_buffer_size);
754  s->bitstream_buffer_size = s1->bitstream_buffer_size;
755  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
756  s1->bitstream_buffer_size);
757  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
759  }
760 
761  // linesize dependend scratch buffer allocation
762  if (!s->edge_emu_buffer)
763  if (s1->linesize) {
764  if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
765  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
766  "scratch buffers.\n");
767  return AVERROR(ENOMEM);
768  }
769  } else {
770  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
771  "be allocated due to unknown size.\n");
772  }
773 
774  // MPEG2/interlacing info
775  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
776  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
777 
778  if (!s1->first_field) {
779  s->last_pict_type = s1->pict_type;
780  if (s1->current_picture_ptr)
781  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
782 
783  if (s1->pict_type != AV_PICTURE_TYPE_B) {
784  s->last_non_b_pict_type = s1->pict_type;
785  }
786  }
787 
788  return 0;
789 }
790 
791 /**
792  * Set the given MpegEncContext to common defaults
793  * (same for encoding and decoding).
794  * The changed fields will not depend upon the
795  * prior state of the MpegEncContext.
796  */
798 {
799  s->y_dc_scale_table =
802  s->progressive_frame = 1;
803  s->progressive_sequence = 1;
805 
806  s->coded_picture_number = 0;
807  s->picture_number = 0;
808  s->input_picture_number = 0;
809 
810  s->picture_in_gop_number = 0;
811 
812  s->f_code = 1;
813  s->b_code = 1;
814 
815  s->slice_context_count = 1;
816 }
817 
818 /**
819  * Set the given MpegEncContext to defaults for decoding.
820  * the changed fields will not depend upon
821  * the prior state of the MpegEncContext.
822  */
824 {
826 }
827 
829 {
830  ERContext *er = &s->er;
831  int mb_array_size = s->mb_height * s->mb_stride;
832  int i;
833 
834  er->avctx = s->avctx;
835  er->dsp = &s->dsp;
836 
837  er->mb_index2xy = s->mb_index2xy;
838  er->mb_num = s->mb_num;
839  er->mb_width = s->mb_width;
840  er->mb_height = s->mb_height;
841  er->mb_stride = s->mb_stride;
842  er->b8_stride = s->b8_stride;
843 
845  er->error_status_table = av_mallocz(mb_array_size);
846  if (!er->er_temp_buffer || !er->error_status_table)
847  goto fail;
848 
849  er->mbskip_table = s->mbskip_table;
850  er->mbintra_table = s->mbintra_table;
851 
852  for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
853  er->dc_val[i] = s->dc_val[i];
854 
856  er->opaque = s;
857 
858  return 0;
859 fail:
860  av_freep(&er->er_temp_buffer);
862  return AVERROR(ENOMEM);
863 }
864 
865 /**
866  * Initialize and allocates MpegEncContext fields dependent on the resolution.
867  */
869 {
870  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
871 
872  s->mb_width = (s->width + 15) / 16;
873  s->mb_stride = s->mb_width + 1;
874  s->b8_stride = s->mb_width * 2 + 1;
875  s->b4_stride = s->mb_width * 4 + 1;
876  mb_array_size = s->mb_height * s->mb_stride;
877  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
878 
879  /* set default edge pos, will be overriden
880  * in decode_header if needed */
881  s->h_edge_pos = s->mb_width * 16;
882  s->v_edge_pos = s->mb_height * 16;
883 
884  s->mb_num = s->mb_width * s->mb_height;
885 
886  s->block_wrap[0] =
887  s->block_wrap[1] =
888  s->block_wrap[2] =
889  s->block_wrap[3] = s->b8_stride;
890  s->block_wrap[4] =
891  s->block_wrap[5] = s->mb_stride;
892 
893  y_size = s->b8_stride * (2 * s->mb_height + 1);
894  c_size = s->mb_stride * (s->mb_height + 1);
895  yc_size = y_size + 2 * c_size;
896 
897  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
898  for (y = 0; y < s->mb_height; y++)
899  for (x = 0; x < s->mb_width; x++)
900  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
901 
902  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
903 
904  if (s->encoding) {
905  /* Allocate MV tables */
906  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
907  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
908  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
909  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
910  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
911  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
912  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
918 
919  /* Allocate MB type table */
920  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
921 
922  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
923 
925  mb_array_size * sizeof(float), fail);
927  mb_array_size * sizeof(float), fail);
928 
929  }
930 
931  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
933  /* interlaced direct mode decoding tables */
934  for (i = 0; i < 2; i++) {
935  int j, k;
936  for (j = 0; j < 2; j++) {
937  for (k = 0; k < 2; k++) {
939  s->b_field_mv_table_base[i][j][k],
940  mv_table_size * 2 * sizeof(int16_t),
941  fail);
942  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
943  s->mb_stride + 1;
944  }
945  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
946  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
947  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
948  }
949  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
950  }
951  }
952  if (s->out_format == FMT_H263) {
953  /* cbp values */
954  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
955  s->coded_block = s->coded_block_base + s->b8_stride + 1;
956 
957  /* cbp, ac_pred, pred_dir */
958  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
959  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
960  }
961 
962  if (s->h263_pred || s->h263_plus || !s->encoding) {
963  /* dc values */
964  // MN: we need these for error resilience of intra-frames
965  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
966  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
967  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
968  s->dc_val[2] = s->dc_val[1] + c_size;
969  for (i = 0; i < yc_size; i++)
970  s->dc_val_base[i] = 1024;
971  }
972 
973  /* which mb is a intra block */
974  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
975  memset(s->mbintra_table, 1, mb_array_size);
976 
977  /* init macroblock skip table */
978  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
979  // Note the + 1 is for a quicker mpeg4 slice_end detection
980 
981  return init_er(s);
982 fail:
983  return AVERROR(ENOMEM);
984 }
985 
986 /**
987  * init common structure for both encoder and decoder.
988  * this assumes that some variables like width/height are already set
989  */
991 {
992  int i;
993  int nb_slices = (HAVE_THREADS &&
995  s->avctx->thread_count : 1;
996 
997  if (s->encoding && s->avctx->slices)
998  nb_slices = s->avctx->slices;
999 
1001  s->mb_height = (s->height + 31) / 32 * 2;
1002  else
1003  s->mb_height = (s->height + 15) / 16;
1004 
1005  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
1007  "decoding to AV_PIX_FMT_NONE is not supported.\n");
1008  return -1;
1009  }
1010 
1011  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
1012  int max_slices;
1013  if (s->mb_height)
1014  max_slices = FFMIN(MAX_THREADS, s->mb_height);
1015  else
1016  max_slices = MAX_THREADS;
1017  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
1018  " reducing to %d\n", nb_slices, max_slices);
1019  nb_slices = max_slices;
1020  }
1021 
1022  if ((s->width || s->height) &&
1023  av_image_check_size(s->width, s->height, 0, s->avctx))
1024  return -1;
1025 
1026  ff_dct_common_init(s);
1027 
1028  s->flags = s->avctx->flags;
1029  s->flags2 = s->avctx->flags2;
1030 
1031  /* set chroma shifts */
1033 
1034  /* convert fourcc to upper case */
1037 
1038  s->avctx->coded_frame = &s->current_picture.f;
1039 
1040  if (s->encoding) {
1041  if (s->msmpeg4_version) {
1043  2 * 2 * (MAX_LEVEL + 1) *
1044  (MAX_RUN + 1) * 2 * sizeof(int), fail);
1045  }
1046  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
1047 
1048  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
1049  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
1050  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
1051  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1052  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1053  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
1056 
1057  if (s->avctx->noise_reduction) {
1058  FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
1059  }
1060  }
1061 
1063  MAX_PICTURE_COUNT * sizeof(Picture), fail);
1064  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1066  }
1067  memset(&s->next_picture, 0, sizeof(s->next_picture));
1068  memset(&s->last_picture, 0, sizeof(s->last_picture));
1069  memset(&s->current_picture, 0, sizeof(s->current_picture));
1073 
1074  if (init_context_frame(s))
1075  goto fail;
1076 
1077  s->parse_context.state = -1;
1078 
1079  s->context_initialized = 1;
1080  s->thread_context[0] = s;
1081 
1082 // if (s->width && s->height) {
1083  if (nb_slices > 1) {
1084  for (i = 1; i < nb_slices; i++) {
1085  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1086  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1087  }
1088 
1089  for (i = 0; i < nb_slices; i++) {
1090  if (init_duplicate_context(s->thread_context[i]) < 0)
1091  goto fail;
1092  s->thread_context[i]->start_mb_y =
1093  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1094  s->thread_context[i]->end_mb_y =
1095  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1096  }
1097  } else {
1098  if (init_duplicate_context(s) < 0)
1099  goto fail;
1100  s->start_mb_y = 0;
1101  s->end_mb_y = s->mb_height;
1102  }
1103  s->slice_context_count = nb_slices;
1104 // }
1105 
1106  return 0;
1107  fail:
1108  ff_MPV_common_end(s);
1109  return -1;
1110 }
1111 
1112 /**
1113  * Frees and resets MpegEncContext fields depending on the resolution.
1114  * Is used during resolution changes to avoid a full reinitialization of the
1115  * codec.
1116  */
1118 {
1119  int i, j, k;
1120 
1121  av_freep(&s->mb_type);
1128  s->p_mv_table = NULL;
1129  s->b_forw_mv_table = NULL;
1130  s->b_back_mv_table = NULL;
1131  s->b_bidir_forw_mv_table = NULL;
1132  s->b_bidir_back_mv_table = NULL;
1133  s->b_direct_mv_table = NULL;
1134  for (i = 0; i < 2; i++) {
1135  for (j = 0; j < 2; j++) {
1136  for (k = 0; k < 2; k++) {
1137  av_freep(&s->b_field_mv_table_base[i][j][k]);
1138  s->b_field_mv_table[i][j][k] = NULL;
1139  }
1140  av_freep(&s->b_field_select_table[i][j]);
1141  av_freep(&s->p_field_mv_table_base[i][j]);
1142  s->p_field_mv_table[i][j] = NULL;
1143  }
1145  }
1146 
1147  av_freep(&s->dc_val_base);
1149  av_freep(&s->mbintra_table);
1150  av_freep(&s->cbp_table);
1151  av_freep(&s->pred_dir_table);
1152 
1153  av_freep(&s->mbskip_table);
1154 
1156  av_freep(&s->er.er_temp_buffer);
1157  av_freep(&s->mb_index2xy);
1158  av_freep(&s->lambda_table);
1159 
1160  av_freep(&s->cplx_tab);
1161  av_freep(&s->bits_tab);
1162 
1163  s->linesize = s->uvlinesize = 0;
1164 
1165  return 0;
1166 }
1167 
1169 {
1170  int i, err = 0;
1171 
1172  if (s->slice_context_count > 1) {
1173  for (i = 0; i < s->slice_context_count; i++) {
1175  }
1176  for (i = 1; i < s->slice_context_count; i++) {
1177  av_freep(&s->thread_context[i]);
1178  }
1179  } else
1181 
1182  if ((err = free_context_frame(s)) < 0)
1183  return err;
1184 
1185  if (s->picture)
1186  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1187  s->picture[i].needs_realloc = 1;
1188  }
1189 
1190  s->last_picture_ptr =
1191  s->next_picture_ptr =
1192  s->current_picture_ptr = NULL;
1193 
1194  // init
1196  s->mb_height = (s->height + 31) / 32 * 2;
1197  else
1198  s->mb_height = (s->height + 15) / 16;
1199 
1200  if ((s->width || s->height) &&
1201  av_image_check_size(s->width, s->height, 0, s->avctx))
1202  return AVERROR_INVALIDDATA;
1203 
1204  if ((err = init_context_frame(s)))
1205  goto fail;
1206 
1207  s->thread_context[0] = s;
1208 
1209  if (s->width && s->height) {
1210  int nb_slices = s->slice_context_count;
1211  if (nb_slices > 1) {
1212  for (i = 1; i < nb_slices; i++) {
1213  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1214  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1215  }
1216 
1217  for (i = 0; i < nb_slices; i++) {
1218  if (init_duplicate_context(s->thread_context[i]) < 0)
1219  goto fail;
1220  s->thread_context[i]->start_mb_y =
1221  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1222  s->thread_context[i]->end_mb_y =
1223  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1224  }
1225  } else {
1226  err = init_duplicate_context(s);
1227  if (err < 0)
1228  goto fail;
1229  s->start_mb_y = 0;
1230  s->end_mb_y = s->mb_height;
1231  }
1232  s->slice_context_count = nb_slices;
1233  }
1234 
1235  return 0;
1236  fail:
1237  ff_MPV_common_end(s);
1238  return err;
1239 }
1240 
1241 /* init common structure for both encoder and decoder */
1243 {
1244  int i;
1245 
1246  if (s->slice_context_count > 1) {
1247  for (i = 0; i < s->slice_context_count; i++) {
1249  }
1250  for (i = 1; i < s->slice_context_count; i++) {
1251  av_freep(&s->thread_context[i]);
1252  }
1253  s->slice_context_count = 1;
1254  } else free_duplicate_context(s);
1255 
1257  s->parse_context.buffer_size = 0;
1258 
1261 
1262  av_freep(&s->avctx->stats_out);
1263  av_freep(&s->ac_stats);
1264 
1267  s->q_chroma_intra_matrix= NULL;
1268  s->q_chroma_intra_matrix16= NULL;
1269  av_freep(&s->q_intra_matrix);
1270  av_freep(&s->q_inter_matrix);
1273  av_freep(&s->input_picture);
1275  av_freep(&s->dct_offset);
1276 
1277  if (s->picture) {
1278  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1279  free_picture_tables(&s->picture[i]);
1280  ff_mpeg_unref_picture(s, &s->picture[i]);
1281  }
1282  }
1283  av_freep(&s->picture);
1292 
1293  free_context_frame(s);
1294 
1295  s->context_initialized = 0;
1296  s->last_picture_ptr =
1297  s->next_picture_ptr =
1298  s->current_picture_ptr = NULL;
1299  s->linesize = s->uvlinesize = 0;
1300 }
1301 
1303  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1304 {
1305  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1306  uint8_t index_run[MAX_RUN + 1];
1307  int last, run, level, start, end, i;
1308 
1309  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1310  if (static_store && rl->max_level[0])
1311  return;
1312 
1313  /* compute max_level[], max_run[] and index_run[] */
1314  for (last = 0; last < 2; last++) {
1315  if (last == 0) {
1316  start = 0;
1317  end = rl->last;
1318  } else {
1319  start = rl->last;
1320  end = rl->n;
1321  }
1322 
1323  memset(max_level, 0, MAX_RUN + 1);
1324  memset(max_run, 0, MAX_LEVEL + 1);
1325  memset(index_run, rl->n, MAX_RUN + 1);
1326  for (i = start; i < end; i++) {
1327  run = rl->table_run[i];
1328  level = rl->table_level[i];
1329  if (index_run[run] == rl->n)
1330  index_run[run] = i;
1331  if (level > max_level[run])
1332  max_level[run] = level;
1333  if (run > max_run[level])
1334  max_run[level] = run;
1335  }
1336  if (static_store)
1337  rl->max_level[last] = static_store[last];
1338  else
1339  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1340  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1341  if (static_store)
1342  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1343  else
1344  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1345  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1346  if (static_store)
1347  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1348  else
1349  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1350  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1351  }
1352 }
1353 
1355 {
1356  int i, q;
1357 
1358  for (q = 0; q < 32; q++) {
1359  int qmul = q * 2;
1360  int qadd = (q - 1) | 1;
1361 
1362  if (q == 0) {
1363  qmul = 1;
1364  qadd = 0;
1365  }
1366  for (i = 0; i < rl->vlc.table_size; i++) {
1367  int code = rl->vlc.table[i][0];
1368  int len = rl->vlc.table[i][1];
1369  int level, run;
1370 
1371  if (len == 0) { // illegal code
1372  run = 66;
1373  level = MAX_LEVEL;
1374  } else if (len < 0) { // more bits needed
1375  run = 0;
1376  level = code;
1377  } else {
1378  if (code == rl->n) { // esc
1379  run = 66;
1380  level = 0;
1381  } else {
1382  run = rl->table_run[code] + 1;
1383  level = rl->table_level[code] * qmul + qadd;
1384  if (code >= rl->last) run += 192;
1385  }
1386  }
1387  rl->rl_vlc[q][i].len = len;
1388  rl->rl_vlc[q][i].level = level;
1389  rl->rl_vlc[q][i].run = run;
1390  }
1391  }
1392 }
1393 
1395 {
1396  int i;
1397 
1398  /* release non reference frames */
1399  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1400  if (!s->picture[i].reference &&
1401  (remove_current || &s->picture[i] != s->current_picture_ptr)) {
1402  ff_mpeg_unref_picture(s, &s->picture[i]);
1403  }
1404  }
1405 }
1406 
1407 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1408 {
1409  if (pic == s->last_picture_ptr)
1410  return 0;
1411  if (pic->f.data[0] == NULL)
1412  return 1;
1413  if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
1414  return 1;
1415  return 0;
1416 }
1417 
1418 static int find_unused_picture(MpegEncContext *s, int shared)
1419 {
1420  int i;
1421 
1422  if (shared) {
1423  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1424  if (s->picture[i].f.data[0] == NULL && &s->picture[i] != s->last_picture_ptr)
1425  return i;
1426  }
1427  } else {
1428  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1429  if (pic_is_unused(s, &s->picture[i]))
1430  return i;
1431  }
1432  }
1433 
1435  "Internal error, picture buffer overflow\n");
1436  /* We could return -1, but the codec would crash trying to draw into a
1437  * non-existing frame anyway. This is safer than waiting for a random crash.
1438  * Also the return of this is never useful, an encoder must only allocate
1439  * as much as allowed in the specification. This has no relationship to how
1440  * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1441  * enough for such valid streams).
1442  * Plus, a decoder has to check stream validity and remove frames if too
1443  * many reference frames are around. Waiting for "OOM" is not correct at
1444  * all. Similarly, missing reference frames have to be replaced by
1445  * interpolated/MC frames, anything else is a bug in the codec ...
1446  */
1447  abort();
1448  return -1;
1449 }
1450 
1452 {
1453  int ret = find_unused_picture(s, shared);
1454 
1455  if (ret >= 0 && ret < MAX_PICTURE_COUNT) {
1456  if (s->picture[ret].needs_realloc) {
1457  s->picture[ret].needs_realloc = 0;
1458  free_picture_tables(&s->picture[ret]);
1459  ff_mpeg_unref_picture(s, &s->picture[ret]);
1461  }
1462  }
1463  return ret;
1464 }
1465 
1467 {
1468  int intra, i;
1469 
1470  for (intra = 0; intra < 2; intra++) {
1471  if (s->dct_count[intra] > (1 << 16)) {
1472  for (i = 0; i < 64; i++) {
1473  s->dct_error_sum[intra][i] >>= 1;
1474  }
1475  s->dct_count[intra] >>= 1;
1476  }
1477 
1478  for (i = 0; i < 64; i++) {
1479  s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1480  s->dct_count[intra] +
1481  s->dct_error_sum[intra][i] / 2) /
1482  (s->dct_error_sum[intra][i] + 1);
1483  }
1484  }
1485 }
1486 
1487 /**
1488  * generic function for encode/decode called after coding/decoding
1489  * the header and before a frame is coded/decoded.
1490  */
1492 {
1493  int i, ret;
1494  Picture *pic;
1495  s->mb_skipped = 0;
1496 
1497  if (!ff_thread_can_start_frame(avctx)) {
1498  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1499  return -1;
1500  }
1501 
1502  /* mark & release old frames */
1503  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1505  s->last_picture_ptr->f.data[0]) {
1507  }
1508 
1509  /* release forgotten pictures */
1510  /* if (mpeg124/h263) */
1511  if (!s->encoding) {
1512  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1513  if (&s->picture[i] != s->last_picture_ptr &&
1514  &s->picture[i] != s->next_picture_ptr &&
1515  s->picture[i].reference && !s->picture[i].needs_realloc) {
1516  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1517  av_log(avctx, AV_LOG_ERROR,
1518  "releasing zombie picture\n");
1519  ff_mpeg_unref_picture(s, &s->picture[i]);
1520  }
1521  }
1522  }
1523 
1525 
1526  if (!s->encoding) {
1528 
1529  if (s->current_picture_ptr &&
1530  s->current_picture_ptr->f.data[0] == NULL) {
1531  // we already have a unused image
1532  // (maybe it was set before reading the header)
1533  pic = s->current_picture_ptr;
1534  } else {
1535  i = ff_find_unused_picture(s, 0);
1536  if (i < 0) {
1537  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1538  return i;
1539  }
1540  pic = &s->picture[i];
1541  }
1542 
1543  pic->reference = 0;
1544  if (!s->droppable) {
1545  if (s->pict_type != AV_PICTURE_TYPE_B)
1546  pic->reference = 3;
1547  }
1548 
1550 
1551  if (ff_alloc_picture(s, pic, 0) < 0)
1552  return -1;
1553 
1554  s->current_picture_ptr = pic;
1555  // FIXME use only the vars from current_pic
1557  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1559  if (s->picture_structure != PICT_FRAME)
1562  }
1566  }
1567 
1569  // if (s->flags && CODEC_FLAG_QSCALE)
1570  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1572 
1573  if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1574  s->current_picture_ptr)) < 0)
1575  return ret;
1576 
1577  if (s->pict_type != AV_PICTURE_TYPE_B) {
1579  if (!s->droppable)
1581  }
1582  av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1584  s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1585  s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1586  s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1587  s->pict_type, s->droppable);
1588 
1589  if ((s->last_picture_ptr == NULL ||
1590  s->last_picture_ptr->f.data[0] == NULL) &&
1591  (s->pict_type != AV_PICTURE_TYPE_I ||
1592  s->picture_structure != PICT_FRAME)) {
1593  int h_chroma_shift, v_chroma_shift;
1595  &h_chroma_shift, &v_chroma_shift);
1596  if (s->pict_type != AV_PICTURE_TYPE_I)
1597  av_log(avctx, AV_LOG_ERROR,
1598  "warning: first frame is no keyframe\n");
1599  else if (s->picture_structure != PICT_FRAME)
1600  av_log(avctx, AV_LOG_INFO,
1601  "allocate dummy last picture for field based first keyframe\n");
1602 
1603  /* Allocate a dummy frame */
1604  i = ff_find_unused_picture(s, 0);
1605  if (i < 0) {
1606  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1607  return i;
1608  }
1609  s->last_picture_ptr = &s->picture[i];
1610  s->last_picture_ptr->f.key_frame = 0;
1611  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1612  s->last_picture_ptr = NULL;
1613  return -1;
1614  }
1615 
1616  memset(s->last_picture_ptr->f.data[0], 0x80,
1617  avctx->height * s->last_picture_ptr->f.linesize[0]);
1618  memset(s->last_picture_ptr->f.data[1], 0x80,
1619  (avctx->height >> v_chroma_shift) *
1620  s->last_picture_ptr->f.linesize[1]);
1621  memset(s->last_picture_ptr->f.data[2], 0x80,
1622  (avctx->height >> v_chroma_shift) *
1623  s->last_picture_ptr->f.linesize[2]);
1624 
1626  for(i=0; i<avctx->height; i++)
1627  memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1628  }
1629 
1630  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1631  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1632  }
1633  if ((s->next_picture_ptr == NULL ||
1634  s->next_picture_ptr->f.data[0] == NULL) &&
1635  s->pict_type == AV_PICTURE_TYPE_B) {
1636  /* Allocate a dummy frame */
1637  i = ff_find_unused_picture(s, 0);
1638  if (i < 0) {
1639  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1640  return i;
1641  }
1642  s->next_picture_ptr = &s->picture[i];
1643  s->next_picture_ptr->f.key_frame = 0;
1644  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1645  s->next_picture_ptr = NULL;
1646  return -1;
1647  }
1648  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1649  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1650  }
1651 
1652 #if 0 // BUFREF-FIXME
1653  memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1654  memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1655 #endif
1656  if (s->last_picture_ptr) {
1658  if (s->last_picture_ptr->f.data[0] &&
1659  (ret = ff_mpeg_ref_picture(s, &s->last_picture,
1660  s->last_picture_ptr)) < 0)
1661  return ret;
1662  }
1663  if (s->next_picture_ptr) {
1665  if (s->next_picture_ptr->f.data[0] &&
1666  (ret = ff_mpeg_ref_picture(s, &s->next_picture,
1667  s->next_picture_ptr)) < 0)
1668  return ret;
1669  }
1670 
1671  assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1672  s->last_picture_ptr->f.data[0]));
1673 
1674  if (s->picture_structure!= PICT_FRAME) {
1675  int i;
1676  for (i = 0; i < 4; i++) {
1678  s->current_picture.f.data[i] +=
1679  s->current_picture.f.linesize[i];
1680  }
1681  s->current_picture.f.linesize[i] *= 2;
1682  s->last_picture.f.linesize[i] *= 2;
1683  s->next_picture.f.linesize[i] *= 2;
1684  }
1685  }
1686 
1687  s->err_recognition = avctx->err_recognition;
1688 
1689  /* set dequantizer, we can't do it during init as
1690  * it might change for mpeg4 and we can't do it in the header
1691  * decode as init is not called for mpeg4 there yet */
1692  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1695  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1698  } else {
1701  }
1702 
1703  if (s->dct_error_sum) {
1706  }
1707 
1708  if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1709  return ff_xvmc_field_start(s, avctx);
1710 
1711  return 0;
1712 }
1713 
1714 /* generic function for encode/decode called after a
1715  * frame has been coded/decoded. */
1717 {
1718  /* redraw edges for the frame if decoding didn't complete */
1719  // just to make sure that all data is rendered.
1720  if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1721  ff_xvmc_field_end(s);
1722  } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1723  !s->avctx->hwaccel &&
1725  s->unrestricted_mv &&
1727  !s->intra_only &&
1728  !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1729  !s->avctx->lowres
1730  ) {
1732  int hshift = desc->log2_chroma_w;
1733  int vshift = desc->log2_chroma_h;
1735  s->h_edge_pos, s->v_edge_pos,
1737  EDGE_TOP | EDGE_BOTTOM);
1739  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1740  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1741  EDGE_TOP | EDGE_BOTTOM);
1743  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1744  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1745  EDGE_TOP | EDGE_BOTTOM);
1746  }
1747 
1748  emms_c();
1749 
1750  s->last_pict_type = s->pict_type;
1752  if (s->pict_type!= AV_PICTURE_TYPE_B) {
1754  }
1755 #if 0
1756  /* copy back current_picture variables */
1757  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1758  if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1759  s->picture[i] = s->current_picture;
1760  break;
1761  }
1762  }
1763  assert(i < MAX_PICTURE_COUNT);
1764 #endif
1765 
1766  // clear copies, to avoid confusion
1767 #if 0
1768  memset(&s->last_picture, 0, sizeof(Picture));
1769  memset(&s->next_picture, 0, sizeof(Picture));
1770  memset(&s->current_picture, 0, sizeof(Picture));
1771 #endif
1773 
1774  if (s->current_picture.reference)
1776 }
1777 
1778 /**
1779  * Draw a line from (ex, ey) -> (sx, sy).
1780  * @param w width of the image
1781  * @param h height of the image
1782  * @param stride stride/linesize of the image
1783  * @param color color of the arrow
1784  */
1785 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1786  int w, int h, int stride, int color)
1787 {
1788  int x, y, fr, f;
1789 
1790  sx = av_clip(sx, 0, w - 1);
1791  sy = av_clip(sy, 0, h - 1);
1792  ex = av_clip(ex, 0, w - 1);
1793  ey = av_clip(ey, 0, h - 1);
1794 
1795  buf[sy * stride + sx] += color;
1796 
1797  if (FFABS(ex - sx) > FFABS(ey - sy)) {
1798  if (sx > ex) {
1799  FFSWAP(int, sx, ex);
1800  FFSWAP(int, sy, ey);
1801  }
1802  buf += sx + sy * stride;
1803  ex -= sx;
1804  f = ((ey - sy) << 16) / ex;
1805  for (x = 0; x <= ex; x++) {
1806  y = (x * f) >> 16;
1807  fr = (x * f) & 0xFFFF;
1808  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1809  if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1810  }
1811  } else {
1812  if (sy > ey) {
1813  FFSWAP(int, sx, ex);
1814  FFSWAP(int, sy, ey);
1815  }
1816  buf += sx + sy * stride;
1817  ey -= sy;
1818  if (ey)
1819  f = ((ex - sx) << 16) / ey;
1820  else
1821  f = 0;
1822  for(y= 0; y <= ey; y++){
1823  x = (y*f) >> 16;
1824  fr = (y*f) & 0xFFFF;
1825  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1826  if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1827  }
1828  }
1829 }
1830 
1831 /**
1832  * Draw an arrow from (ex, ey) -> (sx, sy).
1833  * @param w width of the image
1834  * @param h height of the image
1835  * @param stride stride/linesize of the image
1836  * @param color color of the arrow
1837  */
1838 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1839  int ey, int w, int h, int stride, int color)
1840 {
1841  int dx,dy;
1842 
1843  sx = av_clip(sx, -100, w + 100);
1844  sy = av_clip(sy, -100, h + 100);
1845  ex = av_clip(ex, -100, w + 100);
1846  ey = av_clip(ey, -100, h + 100);
1847 
1848  dx = ex - sx;
1849  dy = ey - sy;
1850 
1851  if (dx * dx + dy * dy > 3 * 3) {
1852  int rx = dx + dy;
1853  int ry = -dx + dy;
1854  int length = ff_sqrt((rx * rx + ry * ry) << 8);
1855 
1856  // FIXME subpixel accuracy
1857  rx = ROUNDED_DIV(rx * 3 << 4, length);
1858  ry = ROUNDED_DIV(ry * 3 << 4, length);
1859 
1860  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1861  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1862  }
1863  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1864 }
1865 
1866 /**
1867  * Print debugging info for the given picture.
1868  */
1869 void ff_print_debug_info2(AVCodecContext *avctx, Picture *p, AVFrame *pict, uint8_t *mbskip_table,
1870  int *low_delay,
1871  int mb_width, int mb_height, int mb_stride, int quarter_sample)
1872 {
1873  if (avctx->hwaccel || !p || !p->mb_type
1875  return;
1876 
1877 
1878  if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1879  int x,y;
1880 
1881  av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1883  for (y = 0; y < mb_height; y++) {
1884  for (x = 0; x < mb_width; x++) {
1885  if (avctx->debug & FF_DEBUG_SKIP) {
1886  int count = mbskip_table[x + y * mb_stride];
1887  if (count > 9)
1888  count = 9;
1889  av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1890  }
1891  if (avctx->debug & FF_DEBUG_QP) {
1892  av_log(avctx, AV_LOG_DEBUG, "%2d",
1893  p->qscale_table[x + y * mb_stride]);
1894  }
1895  if (avctx->debug & FF_DEBUG_MB_TYPE) {
1896  int mb_type = p->mb_type[x + y * mb_stride];
1897  // Type & MV direction
1898  if (IS_PCM(mb_type))
1899  av_log(avctx, AV_LOG_DEBUG, "P");
1900  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1901  av_log(avctx, AV_LOG_DEBUG, "A");
1902  else if (IS_INTRA4x4(mb_type))
1903  av_log(avctx, AV_LOG_DEBUG, "i");
1904  else if (IS_INTRA16x16(mb_type))
1905  av_log(avctx, AV_LOG_DEBUG, "I");
1906  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1907  av_log(avctx, AV_LOG_DEBUG, "d");
1908  else if (IS_DIRECT(mb_type))
1909  av_log(avctx, AV_LOG_DEBUG, "D");
1910  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1911  av_log(avctx, AV_LOG_DEBUG, "g");
1912  else if (IS_GMC(mb_type))
1913  av_log(avctx, AV_LOG_DEBUG, "G");
1914  else if (IS_SKIP(mb_type))
1915  av_log(avctx, AV_LOG_DEBUG, "S");
1916  else if (!USES_LIST(mb_type, 1))
1917  av_log(avctx, AV_LOG_DEBUG, ">");
1918  else if (!USES_LIST(mb_type, 0))
1919  av_log(avctx, AV_LOG_DEBUG, "<");
1920  else {
1921  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1922  av_log(avctx, AV_LOG_DEBUG, "X");
1923  }
1924 
1925  // segmentation
1926  if (IS_8X8(mb_type))
1927  av_log(avctx, AV_LOG_DEBUG, "+");
1928  else if (IS_16X8(mb_type))
1929  av_log(avctx, AV_LOG_DEBUG, "-");
1930  else if (IS_8X16(mb_type))
1931  av_log(avctx, AV_LOG_DEBUG, "|");
1932  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1933  av_log(avctx, AV_LOG_DEBUG, " ");
1934  else
1935  av_log(avctx, AV_LOG_DEBUG, "?");
1936 
1937 
1938  if (IS_INTERLACED(mb_type))
1939  av_log(avctx, AV_LOG_DEBUG, "=");
1940  else
1941  av_log(avctx, AV_LOG_DEBUG, " ");
1942  }
1943  }
1944  av_log(avctx, AV_LOG_DEBUG, "\n");
1945  }
1946  }
1947 
1948  if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1949  (avctx->debug_mv)) {
1950  const int shift = 1 + quarter_sample;
1951  int mb_y;
1952  uint8_t *ptr;
1953  int i;
1954  int h_chroma_shift, v_chroma_shift, block_height;
1955  const int width = avctx->width;
1956  const int height = avctx->height;
1957  const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
1958  const int mv_stride = (mb_width << mv_sample_log2) +
1959  (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1960 
1961  *low_delay = 0; // needed to see the vectors without trashing the buffers
1962 
1963  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1964 
1965  av_frame_make_writable(pict);
1966 
1967  pict->opaque = NULL;
1968  ptr = pict->data[0];
1969  block_height = 16 >> v_chroma_shift;
1970 
1971  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1972  int mb_x;
1973  for (mb_x = 0; mb_x < mb_width; mb_x++) {
1974  const int mb_index = mb_x + mb_y * mb_stride;
1975  if ((avctx->debug_mv) && p->motion_val[0]) {
1976  int type;
1977  for (type = 0; type < 3; type++) {
1978  int direction = 0;
1979  switch (type) {
1980  case 0:
1981  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1982  (pict->pict_type!= AV_PICTURE_TYPE_P))
1983  continue;
1984  direction = 0;
1985  break;
1986  case 1:
1987  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1988  (pict->pict_type!= AV_PICTURE_TYPE_B))
1989  continue;
1990  direction = 0;
1991  break;
1992  case 2:
1993  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1994  (pict->pict_type!= AV_PICTURE_TYPE_B))
1995  continue;
1996  direction = 1;
1997  break;
1998  }
1999  if (!USES_LIST(p->mb_type[mb_index], direction))
2000  continue;
2001 
2002  if (IS_8X8(p->mb_type[mb_index])) {
2003  int i;
2004  for (i = 0; i < 4; i++) {
2005  int sx = mb_x * 16 + 4 + 8 * (i & 1);
2006  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
2007  int xy = (mb_x * 2 + (i & 1) +
2008  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2009  int mx = (p->motion_val[direction][xy][0] >> shift) + sx;
2010  int my = (p->motion_val[direction][xy][1] >> shift) + sy;
2011  draw_arrow(ptr, sx, sy, mx, my, width,
2012  height, pict->linesize[0], 100);
2013  }
2014  } else if (IS_16X8(p->mb_type[mb_index])) {
2015  int i;
2016  for (i = 0; i < 2; i++) {
2017  int sx = mb_x * 16 + 8;
2018  int sy = mb_y * 16 + 4 + 8 * i;
2019  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
2020  int mx = (p->motion_val[direction][xy][0] >> shift);
2021  int my = (p->motion_val[direction][xy][1] >> shift);
2022 
2023  if (IS_INTERLACED(p->mb_type[mb_index]))
2024  my *= 2;
2025 
2026  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2027  height, pict->linesize[0], 100);
2028  }
2029  } else if (IS_8X16(p->mb_type[mb_index])) {
2030  int i;
2031  for (i = 0; i < 2; i++) {
2032  int sx = mb_x * 16 + 4 + 8 * i;
2033  int sy = mb_y * 16 + 8;
2034  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
2035  int mx = p->motion_val[direction][xy][0] >> shift;
2036  int my = p->motion_val[direction][xy][1] >> shift;
2037 
2038  if (IS_INTERLACED(p->mb_type[mb_index]))
2039  my *= 2;
2040 
2041  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
2042  height, pict->linesize[0], 100);
2043  }
2044  } else {
2045  int sx= mb_x * 16 + 8;
2046  int sy= mb_y * 16 + 8;
2047  int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
2048  int mx= (p->motion_val[direction][xy][0]>>shift) + sx;
2049  int my= (p->motion_val[direction][xy][1]>>shift) + sy;
2050  draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
2051  }
2052  }
2053  }
2054  if ((avctx->debug & FF_DEBUG_VIS_QP)) {
2055  uint64_t c = (p->qscale_table[mb_index] * 128 / 31) *
2056  0x0101010101010101ULL;
2057  int y;
2058  for (y = 0; y < block_height; y++) {
2059  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2060  (block_height * mb_y + y) *
2061  pict->linesize[1]) = c;
2062  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2063  (block_height * mb_y + y) *
2064  pict->linesize[2]) = c;
2065  }
2066  }
2067  if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2068  p->motion_val[0]) {
2069  int mb_type = p->mb_type[mb_index];
2070  uint64_t u,v;
2071  int y;
2072 #define COLOR(theta, r) \
2073  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2074  v = (int)(128 + r * sin(theta * 3.141592 / 180));
2075 
2076 
2077  u = v = 128;
2078  if (IS_PCM(mb_type)) {
2079  COLOR(120, 48)
2080  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2081  IS_INTRA16x16(mb_type)) {
2082  COLOR(30, 48)
2083  } else if (IS_INTRA4x4(mb_type)) {
2084  COLOR(90, 48)
2085  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2086  // COLOR(120, 48)
2087  } else if (IS_DIRECT(mb_type)) {
2088  COLOR(150, 48)
2089  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2090  COLOR(170, 48)
2091  } else if (IS_GMC(mb_type)) {
2092  COLOR(190, 48)
2093  } else if (IS_SKIP(mb_type)) {
2094  // COLOR(180, 48)
2095  } else if (!USES_LIST(mb_type, 1)) {
2096  COLOR(240, 48)
2097  } else if (!USES_LIST(mb_type, 0)) {
2098  COLOR(0, 48)
2099  } else {
2100  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2101  COLOR(300,48)
2102  }
2103 
2104  u *= 0x0101010101010101ULL;
2105  v *= 0x0101010101010101ULL;
2106  for (y = 0; y < block_height; y++) {
2107  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2108  (block_height * mb_y + y) * pict->linesize[1]) = u;
2109  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2110  (block_height * mb_y + y) * pict->linesize[2]) = v;
2111  }
2112 
2113  // segmentation
2114  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2115  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2116  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2117  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2118  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2119  }
2120  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2121  for (y = 0; y < 16; y++)
2122  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2123  pict->linesize[0]] ^= 0x80;
2124  }
2125  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2126  int dm = 1 << (mv_sample_log2 - 2);
2127  for (i = 0; i < 4; i++) {
2128  int sx = mb_x * 16 + 8 * (i & 1);
2129  int sy = mb_y * 16 + 8 * (i >> 1);
2130  int xy = (mb_x * 2 + (i & 1) +
2131  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2132  // FIXME bidir
2133  int32_t *mv = (int32_t *) &p->motion_val[0][xy];
2134  if (mv[0] != mv[dm] ||
2135  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2136  for (y = 0; y < 8; y++)
2137  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2138  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2139  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2140  pict->linesize[0]) ^= 0x8080808080808080ULL;
2141  }
2142  }
2143 
2144  if (IS_INTERLACED(mb_type) &&
2145  avctx->codec->id == AV_CODEC_ID_H264) {
2146  // hmm
2147  }
2148  }
2149  mbskip_table[mb_index] = 0;
2150  }
2151  }
2152  }
2153 }
2154 
2156 {
2157  ff_print_debug_info2(s->avctx, p, pict, s->mbskip_table, &s->low_delay,
2158  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2159 }
2160 
2162 {
2164  int offset = 2*s->mb_stride + 1;
2165  if(!ref)
2166  return AVERROR(ENOMEM);
2167  av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
2168  ref->size -= offset;
2169  ref->data += offset;
2170  return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
2171 }
2172 
2174  uint8_t *dest, uint8_t *src,
2175  int field_based, int field_select,
2176  int src_x, int src_y,
2177  int width, int height, int stride,
2178  int h_edge_pos, int v_edge_pos,
2179  int w, int h, h264_chroma_mc_func *pix_op,
2180  int motion_x, int motion_y)
2181 {
2182  const int lowres = s->avctx->lowres;
2183  const int op_index = FFMIN(lowres, 3);
2184  const int s_mask = (2 << lowres) - 1;
2185  int emu = 0;
2186  int sx, sy;
2187 
2188  if (s->quarter_sample) {
2189  motion_x /= 2;
2190  motion_y /= 2;
2191  }
2192 
2193  sx = motion_x & s_mask;
2194  sy = motion_y & s_mask;
2195  src_x += motion_x >> lowres + 1;
2196  src_y += motion_y >> lowres + 1;
2197 
2198  src += src_y * stride + src_x;
2199 
2200  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2201  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2202  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2203  (h + 1) << field_based, src_x,
2204  src_y << field_based,
2205  h_edge_pos,
2206  v_edge_pos);
2207  src = s->edge_emu_buffer;
2208  emu = 1;
2209  }
2210 
2211  sx = (sx << 2) >> lowres;
2212  sy = (sy << 2) >> lowres;
2213  if (field_select)
2214  src += s->linesize;
2215  pix_op[op_index](dest, src, stride, h, sx, sy);
2216  return emu;
2217 }
2218 
2219 /* apply one mpeg motion vector to the three components */
2221  uint8_t *dest_y,
2222  uint8_t *dest_cb,
2223  uint8_t *dest_cr,
2224  int field_based,
2225  int bottom_field,
2226  int field_select,
2227  uint8_t **ref_picture,
2228  h264_chroma_mc_func *pix_op,
2229  int motion_x, int motion_y,
2230  int h, int mb_y)
2231 {
2232  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2233  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2234  uvsx, uvsy;
2235  const int lowres = s->avctx->lowres;
2236  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
2237  const int block_s = 8>>lowres;
2238  const int s_mask = (2 << lowres) - 1;
2239  const int h_edge_pos = s->h_edge_pos >> lowres;
2240  const int v_edge_pos = s->v_edge_pos >> lowres;
2241  linesize = s->current_picture.f.linesize[0] << field_based;
2242  uvlinesize = s->current_picture.f.linesize[1] << field_based;
2243 
2244  // FIXME obviously not perfect but qpel will not work in lowres anyway
2245  if (s->quarter_sample) {
2246  motion_x /= 2;
2247  motion_y /= 2;
2248  }
2249 
2250  if(field_based){
2251  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2252  }
2253 
2254  sx = motion_x & s_mask;
2255  sy = motion_y & s_mask;
2256  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2257  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2258 
2259  if (s->out_format == FMT_H263) {
2260  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2261  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2262  uvsrc_x = src_x >> 1;
2263  uvsrc_y = src_y >> 1;
2264  } else if (s->out_format == FMT_H261) {
2265  // even chroma mv's are full pel in H261
2266  mx = motion_x / 4;
2267  my = motion_y / 4;
2268  uvsx = (2 * mx) & s_mask;
2269  uvsy = (2 * my) & s_mask;
2270  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2271  uvsrc_y = mb_y * block_s + (my >> lowres);
2272  } else {
2273  if(s->chroma_y_shift){
2274  mx = motion_x / 2;
2275  my = motion_y / 2;
2276  uvsx = mx & s_mask;
2277  uvsy = my & s_mask;
2278  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2279  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2280  } else {
2281  if(s->chroma_x_shift){
2282  //Chroma422
2283  mx = motion_x / 2;
2284  uvsx = mx & s_mask;
2285  uvsy = motion_y & s_mask;
2286  uvsrc_y = src_y;
2287  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2288  } else {
2289  //Chroma444
2290  uvsx = motion_x & s_mask;
2291  uvsy = motion_y & s_mask;
2292  uvsrc_x = src_x;
2293  uvsrc_y = src_y;
2294  }
2295  }
2296  }
2297 
2298  ptr_y = ref_picture[0] + src_y * linesize + src_x;
2299  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2300  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2301 
2302  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2303  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2304  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2305  linesize >> field_based, 17, 17 + field_based,
2306  src_x, src_y << field_based, h_edge_pos,
2307  v_edge_pos);
2308  ptr_y = s->edge_emu_buffer;
2309  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2310  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2311  s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2312  9 + field_based,
2313  uvsrc_x, uvsrc_y << field_based,
2314  h_edge_pos >> 1, v_edge_pos >> 1);
2315  s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2316  9 + field_based,
2317  uvsrc_x, uvsrc_y << field_based,
2318  h_edge_pos >> 1, v_edge_pos >> 1);
2319  ptr_cb = uvbuf;
2320  ptr_cr = uvbuf + 16;
2321  }
2322  }
2323 
2324  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2325  if (bottom_field) {
2326  dest_y += s->linesize;
2327  dest_cb += s->uvlinesize;
2328  dest_cr += s->uvlinesize;
2329  }
2330 
2331  if (field_select) {
2332  ptr_y += s->linesize;
2333  ptr_cb += s->uvlinesize;
2334  ptr_cr += s->uvlinesize;
2335  }
2336 
2337  sx = (sx << 2) >> lowres;
2338  sy = (sy << 2) >> lowres;
2339  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2340 
2341  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2342  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2343  uvsx = (uvsx << 2) >> lowres;
2344  uvsy = (uvsy << 2) >> lowres;
2345  if (hc) {
2346  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2347  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2348  }
2349  }
2350  // FIXME h261 lowres loop filter
2351 }
2352 
2354  uint8_t *dest_cb, uint8_t *dest_cr,
2355  uint8_t **ref_picture,
2356  h264_chroma_mc_func * pix_op,
2357  int mx, int my)
2358 {
2359  const int lowres = s->avctx->lowres;
2360  const int op_index = FFMIN(lowres, 3);
2361  const int block_s = 8 >> lowres;
2362  const int s_mask = (2 << lowres) - 1;
2363  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2364  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2365  int emu = 0, src_x, src_y, offset, sx, sy;
2366  uint8_t *ptr;
2367 
2368  if (s->quarter_sample) {
2369  mx /= 2;
2370  my /= 2;
2371  }
2372 
2373  /* In case of 8X8, we construct a single chroma motion vector
2374  with a special rounding */
2375  mx = ff_h263_round_chroma(mx);
2376  my = ff_h263_round_chroma(my);
2377 
2378  sx = mx & s_mask;
2379  sy = my & s_mask;
2380  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2381  src_y = s->mb_y * block_s + (my >> lowres + 1);
2382 
2383  offset = src_y * s->uvlinesize + src_x;
2384  ptr = ref_picture[1] + offset;
2385  if (s->flags & CODEC_FLAG_EMU_EDGE) {
2386  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2387  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2389  9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2390  ptr = s->edge_emu_buffer;
2391  emu = 1;
2392  }
2393  }
2394  sx = (sx << 2) >> lowres;
2395  sy = (sy << 2) >> lowres;
2396  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2397 
2398  ptr = ref_picture[2] + offset;
2399  if (emu) {
2400  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2401  src_x, src_y, h_edge_pos, v_edge_pos);
2402  ptr = s->edge_emu_buffer;
2403  }
2404  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2405 }
2406 
2407 /**
2408  * motion compensation of a single macroblock
2409  * @param s context
2410  * @param dest_y luma destination pointer
2411  * @param dest_cb chroma cb/u destination pointer
2412  * @param dest_cr chroma cr/v destination pointer
2413  * @param dir direction (0->forward, 1->backward)
2414  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2415  * @param pix_op halfpel motion compensation function (average or put normally)
2416  * the motion vectors are taken from s->mv and the MV type from s->mv_type
2417  */
2418 static inline void MPV_motion_lowres(MpegEncContext *s,
2419  uint8_t *dest_y, uint8_t *dest_cb,
2420  uint8_t *dest_cr,
2421  int dir, uint8_t **ref_picture,
2422  h264_chroma_mc_func *pix_op)
2423 {
2424  int mx, my;
2425  int mb_x, mb_y, i;
2426  const int lowres = s->avctx->lowres;
2427  const int block_s = 8 >>lowres;
2428 
2429  mb_x = s->mb_x;
2430  mb_y = s->mb_y;
2431 
2432  switch (s->mv_type) {
2433  case MV_TYPE_16X16:
2434  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2435  0, 0, 0,
2436  ref_picture, pix_op,
2437  s->mv[dir][0][0], s->mv[dir][0][1],
2438  2 * block_s, mb_y);
2439  break;
2440  case MV_TYPE_8X8:
2441  mx = 0;
2442  my = 0;
2443  for (i = 0; i < 4; i++) {
2444  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2445  s->linesize) * block_s,
2446  ref_picture[0], 0, 0,
2447  (2 * mb_x + (i & 1)) * block_s,
2448  (2 * mb_y + (i >> 1)) * block_s,
2449  s->width, s->height, s->linesize,
2450  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2451  block_s, block_s, pix_op,
2452  s->mv[dir][i][0], s->mv[dir][i][1]);
2453 
2454  mx += s->mv[dir][i][0];
2455  my += s->mv[dir][i][1];
2456  }
2457 
2458  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2459  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2460  pix_op, mx, my);
2461  break;
2462  case MV_TYPE_FIELD:
2463  if (s->picture_structure == PICT_FRAME) {
2464  /* top field */
2465  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2466  1, 0, s->field_select[dir][0],
2467  ref_picture, pix_op,
2468  s->mv[dir][0][0], s->mv[dir][0][1],
2469  block_s, mb_y);
2470  /* bottom field */
2471  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2472  1, 1, s->field_select[dir][1],
2473  ref_picture, pix_op,
2474  s->mv[dir][1][0], s->mv[dir][1][1],
2475  block_s, mb_y);
2476  } else {
2477  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2478  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2479  ref_picture = s->current_picture_ptr->f.data;
2480 
2481  }
2482  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2483  0, 0, s->field_select[dir][0],
2484  ref_picture, pix_op,
2485  s->mv[dir][0][0],
2486  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2487  }
2488  break;
2489  case MV_TYPE_16X8:
2490  for (i = 0; i < 2; i++) {
2491  uint8_t **ref2picture;
2492 
2493  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2494  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2495  ref2picture = ref_picture;
2496  } else {
2497  ref2picture = s->current_picture_ptr->f.data;
2498  }
2499 
2500  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2501  0, 0, s->field_select[dir][i],
2502  ref2picture, pix_op,
2503  s->mv[dir][i][0], s->mv[dir][i][1] +
2504  2 * block_s * i, block_s, mb_y >> 1);
2505 
2506  dest_y += 2 * block_s * s->linesize;
2507  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2508  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2509  }
2510  break;
2511  case MV_TYPE_DMV:
2512  if (s->picture_structure == PICT_FRAME) {
2513  for (i = 0; i < 2; i++) {
2514  int j;
2515  for (j = 0; j < 2; j++) {
2516  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2517  1, j, j ^ i,
2518  ref_picture, pix_op,
2519  s->mv[dir][2 * i + j][0],
2520  s->mv[dir][2 * i + j][1],
2521  block_s, mb_y);
2522  }
2524  }
2525  } else {
2526  for (i = 0; i < 2; i++) {
2527  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2528  0, 0, s->picture_structure != i + 1,
2529  ref_picture, pix_op,
2530  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2531  2 * block_s, mb_y >> 1);
2532 
2533  // after put we make avg of the same block
2535 
2536  // opposite parity is always in the same
2537  // frame if this is second field
2538  if (!s->first_field) {
2539  ref_picture = s->current_picture_ptr->f.data;
2540  }
2541  }
2542  }
2543  break;
2544  default:
2545  av_assert2(0);
2546  }
2547 }
2548 
2549 /**
2550  * find the lowest MB row referenced in the MVs
2551  */
2553 {
2554  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2555  int my, off, i, mvs;
2556 
2557  if (s->picture_structure != PICT_FRAME || s->mcsel)
2558  goto unhandled;
2559 
2560  switch (s->mv_type) {
2561  case MV_TYPE_16X16:
2562  mvs = 1;
2563  break;
2564  case MV_TYPE_16X8:
2565  mvs = 2;
2566  break;
2567  case MV_TYPE_8X8:
2568  mvs = 4;
2569  break;
2570  default:
2571  goto unhandled;
2572  }
2573 
2574  for (i = 0; i < mvs; i++) {
2575  my = s->mv[dir][i][1]<<qpel_shift;
2576  my_max = FFMAX(my_max, my);
2577  my_min = FFMIN(my_min, my);
2578  }
2579 
2580  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2581 
2582  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2583 unhandled:
2584  return s->mb_height-1;
2585 }
2586 
2587 /* put block[] to dest[] */
2588 static inline void put_dct(MpegEncContext *s,
2589  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2590 {
2591  s->dct_unquantize_intra(s, block, i, qscale);
2592  s->dsp.idct_put (dest, line_size, block);
2593 }
2594 
2595 /* add block[] to dest[] */
2596 static inline void add_dct(MpegEncContext *s,
2597  int16_t *block, int i, uint8_t *dest, int line_size)
2598 {
2599  if (s->block_last_index[i] >= 0) {
2600  s->dsp.idct_add (dest, line_size, block);
2601  }
2602 }
2603 
2604 static inline void add_dequant_dct(MpegEncContext *s,
2605  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2606 {
2607  if (s->block_last_index[i] >= 0) {
2608  s->dct_unquantize_inter(s, block, i, qscale);
2609 
2610  s->dsp.idct_add (dest, line_size, block);
2611  }
2612 }
2613 
2614 /**
2615  * Clean dc, ac, coded_block for the current non-intra MB.
2616  */
2618 {
2619  int wrap = s->b8_stride;
2620  int xy = s->block_index[0];
2621 
2622  s->dc_val[0][xy ] =
2623  s->dc_val[0][xy + 1 ] =
2624  s->dc_val[0][xy + wrap] =
2625  s->dc_val[0][xy + 1 + wrap] = 1024;
2626  /* ac pred */
2627  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2628  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2629  if (s->msmpeg4_version>=3) {
2630  s->coded_block[xy ] =
2631  s->coded_block[xy + 1 ] =
2632  s->coded_block[xy + wrap] =
2633  s->coded_block[xy + 1 + wrap] = 0;
2634  }
2635  /* chroma */
2636  wrap = s->mb_stride;
2637  xy = s->mb_x + s->mb_y * wrap;
2638  s->dc_val[1][xy] =
2639  s->dc_val[2][xy] = 1024;
2640  /* ac pred */
2641  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2642  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2643 
2644  s->mbintra_table[xy]= 0;
2645 }
2646 
2647 /* generic function called after a macroblock has been parsed by the
2648  decoder or after it has been encoded by the encoder.
2649 
2650  Important variables used:
2651  s->mb_intra : true if intra macroblock
2652  s->mv_dir : motion vector direction
2653  s->mv_type : motion vector type
2654  s->mv : motion vector
2655  s->interlaced_dct : true if interlaced dct used (mpeg2)
2656  */
2657 static av_always_inline
2659  int lowres_flag, int is_mpeg12)
2660 {
2661  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2662  if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2663  ff_xvmc_decode_mb(s);//xvmc uses pblocks
2664  return;
2665  }
2666 
2667  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2668  /* print DCT coefficients */
2669  int i,j;
2670  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2671  for(i=0; i<6; i++){
2672  for(j=0; j<64; j++){
2673  av_log(s->avctx, AV_LOG_DEBUG, "%5d", block[i][s->dsp.idct_permutation[j]]);
2674  }
2675  av_log(s->avctx, AV_LOG_DEBUG, "\n");
2676  }
2677  }
2678 
2679  s->current_picture.qscale_table[mb_xy] = s->qscale;
2680 
2681  /* update DC predictors for P macroblocks */
2682  if (!s->mb_intra) {
2683  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2684  if(s->mbintra_table[mb_xy])
2686  } else {
2687  s->last_dc[0] =
2688  s->last_dc[1] =
2689  s->last_dc[2] = 128 << s->intra_dc_precision;
2690  }
2691  }
2692  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2693  s->mbintra_table[mb_xy]=1;
2694 
2695  if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2696  uint8_t *dest_y, *dest_cb, *dest_cr;
2697  int dct_linesize, dct_offset;
2698  op_pixels_func (*op_pix)[4];
2699  qpel_mc_func (*op_qpix)[16];
2700  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2701  const int uvlinesize = s->current_picture.f.linesize[1];
2702  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2703  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2704 
2705  /* avoid copy if macroblock skipped in last frame too */
2706  /* skip only during decoding as we might trash the buffers during encoding a bit */
2707  if(!s->encoding){
2708  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2709 
2710  if (s->mb_skipped) {
2711  s->mb_skipped= 0;
2713  *mbskip_ptr = 1;
2714  } else if(!s->current_picture.reference) {
2715  *mbskip_ptr = 1;
2716  } else{
2717  *mbskip_ptr = 0; /* not skipped */
2718  }
2719  }
2720 
2721  dct_linesize = linesize << s->interlaced_dct;
2722  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2723 
2724  if(readable){
2725  dest_y= s->dest[0];
2726  dest_cb= s->dest[1];
2727  dest_cr= s->dest[2];
2728  }else{
2729  dest_y = s->b_scratchpad;
2730  dest_cb= s->b_scratchpad+16*linesize;
2731  dest_cr= s->b_scratchpad+32*linesize;
2732  }
2733 
2734  if (!s->mb_intra) {
2735  /* motion handling */
2736  /* decoding or more than one mb_type (MC was already done otherwise) */
2737  if(!s->encoding){
2738 
2739  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2740  if (s->mv_dir & MV_DIR_FORWARD) {
2743  0);
2744  }
2745  if (s->mv_dir & MV_DIR_BACKWARD) {
2748  0);
2749  }
2750  }
2751 
2752  if(lowres_flag){
2754 
2755  if (s->mv_dir & MV_DIR_FORWARD) {
2756  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2758  }
2759  if (s->mv_dir & MV_DIR_BACKWARD) {
2760  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2761  }
2762  }else{
2763  op_qpix= s->me.qpel_put;
2764  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2765  op_pix = s->hdsp.put_pixels_tab;
2766  }else{
2767  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2768  }
2769  if (s->mv_dir & MV_DIR_FORWARD) {
2770  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2771  op_pix = s->hdsp.avg_pixels_tab;
2772  op_qpix= s->me.qpel_avg;
2773  }
2774  if (s->mv_dir & MV_DIR_BACKWARD) {
2775  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2776  }
2777  }
2778  }
2779 
2780  /* skip dequant / idct if we are really late ;) */
2781  if(s->avctx->skip_idct){
2784  || s->avctx->skip_idct >= AVDISCARD_ALL)
2785  goto skip_idct;
2786  }
2787 
2788  /* add dct residue */
2790  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2791  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2792  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2793  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2794  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2795 
2796  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2797  if (s->chroma_y_shift){
2798  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2799  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2800  }else{
2801  dct_linesize >>= 1;
2802  dct_offset >>=1;
2803  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2804  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2805  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2806  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2807  }
2808  }
2809  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2810  add_dct(s, block[0], 0, dest_y , dct_linesize);
2811  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2812  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2813  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2814 
2815  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2816  if(s->chroma_y_shift){//Chroma420
2817  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2818  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2819  }else{
2820  //chroma422
2821  dct_linesize = uvlinesize << s->interlaced_dct;
2822  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2823 
2824  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2825  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2826  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2827  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2828  if(!s->chroma_x_shift){//Chroma444
2829  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2830  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2831  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2832  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2833  }
2834  }
2835  }//fi gray
2836  }
2837  else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2838  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2839  }
2840  } else {
2841  /* dct only in intra block */
2843  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2844  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2845  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2846  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2847 
2848  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2849  if(s->chroma_y_shift){
2850  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2851  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2852  }else{
2853  dct_offset >>=1;
2854  dct_linesize >>=1;
2855  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2856  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2857  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2858  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2859  }
2860  }
2861  }else{
2862  s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2863  s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2864  s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2865  s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2866 
2867  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2868  if(s->chroma_y_shift){
2869  s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2870  s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2871  }else{
2872 
2873  dct_linesize = uvlinesize << s->interlaced_dct;
2874  dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2875 
2876  s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2877  s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2878  s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2879  s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2880  if(!s->chroma_x_shift){//Chroma444
2881  s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2882  s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2883  s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2884  s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2885  }
2886  }
2887  }//gray
2888  }
2889  }
2890 skip_idct:
2891  if(!readable){
2892  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2893  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2894  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2895  }
2896  }
2897 }
2898 
2899 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2900 #if !CONFIG_SMALL
2901  if(s->out_format == FMT_MPEG1) {
2902  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2903  else MPV_decode_mb_internal(s, block, 0, 1);
2904  } else
2905 #endif
2906  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2907  else MPV_decode_mb_internal(s, block, 0, 0);
2908 }
2909 
2910 /**
2911  * @param h is the normal height, this will be reduced automatically if needed for the last row
2912  */
2914  Picture *last, int y, int h, int picture_structure,
2915  int first_field, int draw_edges, int low_delay,
2916  int v_edge_pos, int h_edge_pos)
2917 {
2918  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2919  int hshift = desc->log2_chroma_w;
2920  int vshift = desc->log2_chroma_h;
2921  const int field_pic = picture_structure != PICT_FRAME;
2922  if(field_pic){
2923  h <<= 1;
2924  y <<= 1;
2925  }
2926 
2927  if (!avctx->hwaccel &&
2929  draw_edges &&
2930  cur->reference &&
2931  !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2932  int *linesize = cur->f.linesize;
2933  int sides = 0, edge_h;
2934  if (y==0) sides |= EDGE_TOP;
2935  if (y + h >= v_edge_pos)
2936  sides |= EDGE_BOTTOM;
2937 
2938  edge_h= FFMIN(h, v_edge_pos - y);
2939 
2940  dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2941  linesize[0], h_edge_pos, edge_h,
2942  EDGE_WIDTH, EDGE_WIDTH, sides);
2943  dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2944  linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2945  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2946  dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2947  linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2948  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2949  }
2950 
2951  h = FFMIN(h, avctx->height - y);
2952 
2953  if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2954 
2955  if (avctx->draw_horiz_band) {
2956  AVFrame *src;
2958  int i;
2959 
2960  if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2962  src = &cur->f;
2963  else if (last)
2964  src = &last->f;
2965  else
2966  return;
2967 
2968  if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2969  picture_structure == PICT_FRAME &&
2970  avctx->codec_id != AV_CODEC_ID_SVQ3) {
2971  for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2972  offset[i] = 0;
2973  }else{
2974  offset[0]= y * src->linesize[0];
2975  offset[1]=
2976  offset[2]= (y >> vshift) * src->linesize[1];
2977  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2978  offset[i] = 0;
2979  }
2980 
2981  emms_c();
2982 
2983  avctx->draw_horiz_band(avctx, src, offset,
2984  y, picture_structure, h);
2985  }
2986 }
2987 
2989 {
2990  int draw_edges = s->unrestricted_mv && !s->intra_only;
2992  &s->last_picture, y, h, s->picture_structure,
2993  s->first_field, draw_edges, s->low_delay,
2994  s->v_edge_pos, s->h_edge_pos);
2995 }
2996 
2997 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2998  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2999  const int uvlinesize = s->current_picture.f.linesize[1];
3000  const int mb_size= 4 - s->avctx->lowres;
3001 
3002  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
3003  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
3004  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
3005  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
3006  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3007  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
3008  //block_index is not used by mpeg2, so it is not affected by chroma_format
3009 
3010  s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
3011  s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3012  s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
3013 
3015  {
3016  if(s->picture_structure==PICT_FRAME){
3017  s->dest[0] += s->mb_y * linesize << mb_size;
3018  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3019  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
3020  }else{
3021  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
3022  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3023  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
3025  }
3026  }
3027 }
3028 
3029 /**
3030  * Permute an 8x8 block.
3031  * @param block the block which will be permuted according to the given permutation vector
3032  * @param permutation the permutation vector
3033  * @param last the last non zero coefficient in scantable order, used to speed the permutation up
3034  * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
3035  * (inverse) permutated to scantable order!
3036  */
3037 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
3038 {
3039  int i;
3040  int16_t temp[64];
3041 
3042  if(last<=0) return;
3043  //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
3044 
3045  for(i=0; i<=last; i++){
3046  const int j= scantable[i];
3047  temp[j]= block[j];
3048  block[j]=0;
3049  }
3050 
3051  for(i=0; i<=last; i++){
3052  const int j= scantable[i];
3053  const int perm_j= permutation[j];
3054  block[perm_j]= temp[j];
3055  }
3056 }
3057 
3059  int i;
3060  MpegEncContext *s = avctx->priv_data;
3061 
3062  if(s==NULL || s->picture==NULL)
3063  return;
3064 
3065  for (i = 0; i < MAX_PICTURE_COUNT; i++)
3066  ff_mpeg_unref_picture(s, &s->picture[i]);
3068 
3072 
3073  s->mb_x= s->mb_y= 0;
3074  s->closed_gop= 0;
3075 
3076  s->parse_context.state= -1;
3078  s->parse_context.overread= 0;
3080  s->parse_context.index= 0;
3081  s->parse_context.last_index= 0;
3082  s->bitstream_buffer_size=0;
3083  s->pp_time=0;
3084 }
3085 
3087  int16_t *block, int n, int qscale)
3088 {
3089  int i, level, nCoeffs;
3090  const uint16_t *quant_matrix;
3091 
3092  nCoeffs= s->block_last_index[n];
3093 
3094  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3095  /* XXX: only mpeg1 */
3096  quant_matrix = s->intra_matrix;
3097  for(i=1;i<=nCoeffs;i++) {
3098  int j= s->intra_scantable.permutated[i];
3099  level = block[j];
3100  if (level) {
3101  if (level < 0) {
3102  level = -level;
3103  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3104  level = (level - 1) | 1;
3105  level = -level;
3106  } else {
3107  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3108  level = (level - 1) | 1;
3109  }
3110  block[j] = level;
3111  }
3112  }
3113 }
3114 
3116  int16_t *block, int n, int qscale)
3117 {
3118  int i, level, nCoeffs;
3119  const uint16_t *quant_matrix;
3120 
3121  nCoeffs= s->block_last_index[n];
3122 
3123  quant_matrix = s->inter_matrix;
3124  for(i=0; i<=nCoeffs; i++) {
3125  int j= s->intra_scantable.permutated[i];
3126  level = block[j];
3127  if (level) {
3128  if (level < 0) {
3129  level = -level;
3130  level = (((level << 1) + 1) * qscale *
3131  ((int) (quant_matrix[j]))) >> 4;
3132  level = (level - 1) | 1;
3133  level = -level;
3134  } else {
3135  level = (((level << 1) + 1) * qscale *
3136  ((int) (quant_matrix[j]))) >> 4;
3137  level = (level - 1) | 1;
3138  }
3139  block[j] = level;
3140  }
3141  }
3142 }
3143 
3145  int16_t *block, int n, int qscale)
3146 {
3147  int i, level, nCoeffs;
3148  const uint16_t *quant_matrix;
3149 
3150  if(s->alternate_scan) nCoeffs= 63;
3151  else nCoeffs= s->block_last_index[n];
3152 
3153  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3154  quant_matrix = s->intra_matrix;
3155  for(i=1;i<=nCoeffs;i++) {
3156  int j= s->intra_scantable.permutated[i];
3157  level = block[j];
3158  if (level) {
3159  if (level < 0) {
3160  level = -level;
3161  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3162  level = -level;
3163  } else {
3164  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3165  }
3166  block[j] = level;
3167  }
3168  }
3169 }
3170 
3172  int16_t *block, int n, int qscale)
3173 {
3174  int i, level, nCoeffs;
3175  const uint16_t *quant_matrix;
3176  int sum=-1;
3177 
3178  if(s->alternate_scan) nCoeffs= 63;
3179  else nCoeffs= s->block_last_index[n];
3180 
3181  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3182  sum += block[0];
3183  quant_matrix = s->intra_matrix;
3184  for(i=1;i<=nCoeffs;i++) {
3185  int j= s->intra_scantable.permutated[i];
3186  level = block[j];
3187  if (level) {
3188  if (level < 0) {
3189  level = -level;
3190  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3191  level = -level;
3192  } else {
3193  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3194  }
3195  block[j] = level;
3196  sum+=level;
3197  }
3198  }
3199  block[63]^=sum&1;
3200 }
3201 
3203  int16_t *block, int n, int qscale)
3204 {
3205  int i, level, nCoeffs;
3206  const uint16_t *quant_matrix;
3207  int sum=-1;
3208 
3209  if(s->alternate_scan) nCoeffs= 63;
3210  else nCoeffs= s->block_last_index[n];
3211 
3212  quant_matrix = s->inter_matrix;
3213  for(i=0; i<=nCoeffs; i++) {
3214  int j= s->intra_scantable.permutated[i];
3215  level = block[j];
3216  if (level) {
3217  if (level < 0) {
3218  level = -level;
3219  level = (((level << 1) + 1) * qscale *
3220  ((int) (quant_matrix[j]))) >> 4;
3221  level = -level;
3222  } else {
3223  level = (((level << 1) + 1) * qscale *
3224  ((int) (quant_matrix[j]))) >> 4;
3225  }
3226  block[j] = level;
3227  sum+=level;
3228  }
3229  }
3230  block[63]^=sum&1;
3231 }
3232 
3234  int16_t *block, int n, int qscale)
3235 {
3236  int i, level, qmul, qadd;
3237  int nCoeffs;
3238 
3239  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
3240 
3241  qmul = qscale << 1;
3242 
3243  if (!s->h263_aic) {
3244  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3245  qadd = (qscale - 1) | 1;
3246  }else{
3247  qadd = 0;
3248  }
3249  if(s->ac_pred)
3250  nCoeffs=63;
3251  else
3252  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3253 
3254  for(i=1; i<=nCoeffs; i++) {
3255  level = block[i];
3256  if (level) {
3257  if (level < 0) {
3258  level = level * qmul - qadd;
3259  } else {
3260  level = level * qmul + qadd;
3261  }
3262  block[i] = level;
3263  }
3264  }
3265 }
3266 
3268  int16_t *block, int n, int qscale)
3269 {
3270  int i, level, qmul, qadd;
3271  int nCoeffs;
3272 
3273  av_assert2(s->block_last_index[n]>=0);
3274 
3275  qadd = (qscale - 1) | 1;
3276  qmul = qscale << 1;
3277 
3278  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3279 
3280  for(i=0; i<=nCoeffs; i++) {
3281  level = block[i];
3282  if (level) {
3283  if (level < 0) {
3284  level = level * qmul - qadd;
3285  } else {
3286  level = level * qmul + qadd;
3287  }
3288  block[i] = level;
3289  }
3290  }
3291 }
3292 
3293 /**
3294  * set qscale and update qscale dependent variables.
3295  */
3296 void ff_set_qscale(MpegEncContext * s, int qscale)
3297 {
3298  if (qscale < 1)
3299  qscale = 1;
3300  else if (qscale > 31)
3301  qscale = 31;
3302 
3303  s->qscale = qscale;
3304  s->chroma_qscale= s->chroma_qscale_table[qscale];
3305 
3306  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3308 }
3309 
3311 {
3314 }
3315 
3316 #if CONFIG_ERROR_RESILIENCE
3318 {
3319  ERContext *er = &s->er;
3320 
3321  er->cur_pic = s->current_picture_ptr;
3322  er->last_pic = s->last_picture_ptr;
3323  er->next_pic = s->next_picture_ptr;
3324 
3325  er->pp_time = s->pp_time;
3326  er->pb_time = s->pb_time;
3327  er->quarter_sample = s->quarter_sample;
3329 
3330  ff_er_frame_start(er);
3331 }
3332 #endif /* CONFIG_ERROR_RESILIENCE */