FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "avcodec.h"
42 #include "dct.h"
43 #include "idctdsp.h"
44 #include "mpeg12.h"
45 #include "mpegvideo.h"
46 #include "mpegvideodata.h"
47 #include "h261.h"
48 #include "h263.h"
49 #include "h263data.h"
50 #include "mjpegenc_common.h"
51 #include "mathops.h"
52 #include "mpegutils.h"
53 #include "mjpegenc.h"
54 #include "msmpeg4.h"
55 #include "pixblockdsp.h"
56 #include "qpeldsp.h"
57 #include "faandct.h"
58 #include "thread.h"
59 #include "aandcttab.h"
60 #include "flv.h"
61 #include "mpeg4video.h"
62 #include "internal.h"
63 #include "bytestream.h"
64 #include "wmv2.h"
65 #include "rv10.h"
66 #include "packet_internal.h"
67 #include "libxvid.h"
68 #include <limits.h>
69 #include "sp5x.h"
70 
71 #define QUANT_BIAS_SHIFT 8
72 
73 #define QMAT_SHIFT_MMX 16
74 #define QMAT_SHIFT 21
75 
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 
84 
87  { NULL },
88 };
89 
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91  uint16_t (*qmat16)[2][64],
92  const uint16_t *quant_matrix,
93  int bias, int qmin, int qmax, int intra)
94 {
95  FDCTDSPContext *fdsp = &s->fdsp;
96  int qscale;
97  int shift = 0;
98 
99  for (qscale = qmin; qscale <= qmax; qscale++) {
100  int i;
101  int qscale2;
102 
103  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
104  else qscale2 = qscale << 1;
105 
106  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 #if CONFIG_FAANDCT
108  fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
111  for (i = 0; i < 64; i++) {
112  const int j = s->idsp.idct_permutation[i];
113  int64_t den = (int64_t) qscale2 * quant_matrix[j];
114  /* 16 <= qscale * quant_matrix[i] <= 7905
115  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116  * 19952 <= x <= 249205026
117  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118  * 3444240 >= (1 << 36) / (x) >= 275 */
119 
120  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121  }
122  } else if (fdsp->fdct == ff_fdct_ifast) {
123  for (i = 0; i < 64; i++) {
124  const int j = s->idsp.idct_permutation[i];
125  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126  /* 16 <= qscale * quant_matrix[i] <= 7905
127  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128  * 19952 <= x <= 249205026
129  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130  * 3444240 >= (1 << 36) / (x) >= 275 */
131 
132  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
133  }
134  } else {
135  for (i = 0; i < 64; i++) {
136  const int j = s->idsp.idct_permutation[i];
137  int64_t den = (int64_t) qscale2 * quant_matrix[j];
138  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139  * Assume x = qscale * quant_matrix[i]
140  * So 16 <= x <= 7905
141  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142  * so 32768 >= (1 << 19) / (x) >= 67 */
143  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145  // (qscale * quant_matrix[i]);
146  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 
148  if (qmat16[qscale][0][i] == 0 ||
149  qmat16[qscale][0][i] == 128 * 256)
150  qmat16[qscale][0][i] = 128 * 256 - 1;
151  qmat16[qscale][1][i] =
152  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153  qmat16[qscale][0][i]);
154  }
155  }
156 
157  for (i = intra; i < 64; i++) {
158  int64_t max = 8191;
159  if (fdsp->fdct == ff_fdct_ifast) {
160  max = (8191LL * ff_aanscales[i]) >> 14;
161  }
162  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
163  shift++;
164  }
165  }
166  }
167  if (shift) {
168  av_log(s->avctx, AV_LOG_INFO,
169  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
170  QMAT_SHIFT - shift);
171  }
172 }
173 
174 static inline void update_qscale(MpegEncContext *s)
175 {
176  if (s->q_scale_type == 1 && 0) {
177  int i;
178  int bestdiff=INT_MAX;
179  int best = 1;
180 
181  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
184  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185  continue;
186  if (diff < bestdiff) {
187  bestdiff = diff;
188  best = i;
189  }
190  }
191  s->qscale = best;
192  } else {
193  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194  (FF_LAMBDA_SHIFT + 7);
195  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
196  }
197 
198  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
200 }
201 
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
203 {
204  int i;
205 
206  if (matrix) {
207  put_bits(pb, 1, 1);
208  for (i = 0; i < 64; i++) {
209  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
210  }
211  } else
212  put_bits(pb, 1, 0);
213 }
214 
215 /**
216  * init s->current_picture.qscale_table from s->lambda_table
217  */
219 {
220  int8_t * const qscale_table = s->current_picture.qscale_table;
221  int i;
222 
223  for (i = 0; i < s->mb_num; i++) {
224  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
227  s->avctx->qmax);
228  }
229 }
230 
233 {
234 #define COPY(a) dst->a= src->a
235  COPY(pict_type);
237  COPY(f_code);
238  COPY(b_code);
239  COPY(qscale);
240  COPY(lambda);
241  COPY(lambda2);
244  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245  COPY(progressive_frame); // FIXME don't set in encode_header
246  COPY(partitioned_frame); // FIXME don't set in encode_header
247 #undef COPY
248 }
249 
250 /**
251  * Set the given MpegEncContext to defaults for encoding.
252  * the changed fields will not depend upon the prior state of the MpegEncContext.
253  */
255 {
256  int i;
258 
259  for (i = -16; i < 16; i++) {
260  default_fcode_tab[i + MAX_MV] = 1;
261  }
262  s->me.mv_penalty = default_mv_penalty;
263  s->fcode_tab = default_fcode_tab;
264 
265  s->input_picture_number = 0;
266  s->picture_in_gop_number = 0;
267 }
268 
270 {
271  if (ARCH_X86)
273 
274  if (CONFIG_H263_ENCODER)
275  ff_h263dsp_init(&s->h263dsp);
276  if (!s->dct_quantize)
277  s->dct_quantize = ff_dct_quantize_c;
278  if (!s->denoise_dct)
279  s->denoise_dct = denoise_dct_c;
280  s->fast_dct_quantize = s->dct_quantize;
281  if (s->avctx->trellis)
282  s->dct_quantize = dct_quantize_trellis_c;
283 
284  return 0;
285 }
286 
287 /* init video encoder */
289 {
291  AVCPBProperties *cpb_props;
292  int i, ret, format_supported;
293 
295 
296  switch (avctx->codec_id) {
298  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
301  "only YUV420 and YUV422 are supported\n");
302  return AVERROR(EINVAL);
303  }
304  break;
305  case AV_CODEC_ID_MJPEG:
306  case AV_CODEC_ID_AMV:
307  format_supported = 0;
308  /* JPEG color space */
316  format_supported = 1;
317  /* MPEG color space */
322  format_supported = 1;
323 
324  if (!format_supported) {
325  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
326  return AVERROR(EINVAL);
327  }
328  break;
329  default:
330  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
332  return AVERROR(EINVAL);
333  }
334  }
335 
336  switch (avctx->pix_fmt) {
337  case AV_PIX_FMT_YUVJ444P:
338  case AV_PIX_FMT_YUV444P:
339  s->chroma_format = CHROMA_444;
340  break;
341  case AV_PIX_FMT_YUVJ422P:
342  case AV_PIX_FMT_YUV422P:
343  s->chroma_format = CHROMA_422;
344  break;
345  case AV_PIX_FMT_YUVJ420P:
346  case AV_PIX_FMT_YUV420P:
347  default:
348  s->chroma_format = CHROMA_420;
349  break;
350  }
351 
353 
354 #if FF_API_PRIVATE_OPT
356  if (avctx->rtp_payload_size)
357  s->rtp_payload_size = avctx->rtp_payload_size;
359  s->me_penalty_compensation = avctx->me_penalty_compensation;
360  if (avctx->pre_me)
361  s->me_pre = avctx->pre_me;
363 #endif
364 
365  s->bit_rate = avctx->bit_rate;
366  s->width = avctx->width;
367  s->height = avctx->height;
368  if (avctx->gop_size > 600 &&
371  "keyframe interval too large!, reducing it from %d to %d\n",
372  avctx->gop_size, 600);
373  avctx->gop_size = 600;
374  }
375  s->gop_size = avctx->gop_size;
376  s->avctx = avctx;
378  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379  "is %d.\n", MAX_B_FRAMES);
381  }
382  s->max_b_frames = avctx->max_b_frames;
383  s->codec_id = avctx->codec->id;
384  s->strict_std_compliance = avctx->strict_std_compliance;
385  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386  s->rtp_mode = !!s->rtp_payload_size;
387  s->intra_dc_precision = avctx->intra_dc_precision;
388 
389  // workaround some differences between how applications specify dc precision
390  if (s->intra_dc_precision < 0) {
391  s->intra_dc_precision += 8;
392  } else if (s->intra_dc_precision >= 8)
393  s->intra_dc_precision -= 8;
394 
395  if (s->intra_dc_precision < 0) {
397  "intra dc precision must be positive, note some applications use"
398  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399  return AVERROR(EINVAL);
400  }
401 
403  s->huffman = 0;
404 
405  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407  return AVERROR(EINVAL);
408  }
409  s->user_specified_pts = AV_NOPTS_VALUE;
410 
411  if (s->gop_size <= 1) {
412  s->intra_only = 1;
413  s->gop_size = 12;
414  } else {
415  s->intra_only = 0;
416  }
417 
418  /* Fixed QSCALE */
419  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
420 
421  s->adaptive_quant = (s->avctx->lumi_masking ||
422  s->avctx->dark_masking ||
423  s->avctx->temporal_cplx_masking ||
424  s->avctx->spatial_cplx_masking ||
425  s->avctx->p_masking ||
426  s->border_masking ||
427  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
428  !s->fixed_qscale;
429 
430  s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
431 
433  switch(avctx->codec_id) {
436  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
437  break;
438  case AV_CODEC_ID_MPEG4:
442  if (avctx->rc_max_rate >= 15000000) {
443  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
444  } else if(avctx->rc_max_rate >= 2000000) {
445  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
446  } else if(avctx->rc_max_rate >= 384000) {
447  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
448  } else
449  avctx->rc_buffer_size = 40;
450  avctx->rc_buffer_size *= 16384;
451  break;
452  }
453  if (avctx->rc_buffer_size) {
454  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
455  }
456  }
457 
458  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
459  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
460  return AVERROR(EINVAL);
461  }
462 
465  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
466  }
467 
469  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
470  return AVERROR(EINVAL);
471  }
472 
474  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
475  return AVERROR(EINVAL);
476  }
477 
478  if (avctx->rc_max_rate &&
482  "impossible bitrate constraints, this will fail\n");
483  }
484 
485  if (avctx->rc_buffer_size &&
486  avctx->bit_rate * (int64_t)avctx->time_base.num >
487  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
488  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
489  return AVERROR(EINVAL);
490  }
491 
492  if (!s->fixed_qscale &&
495  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
497  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
498  if (nbt <= INT_MAX) {
499  avctx->bit_rate_tolerance = nbt;
500  } else
501  avctx->bit_rate_tolerance = INT_MAX;
502  }
503 
504  if (s->avctx->rc_max_rate &&
505  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
506  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
507  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
508  90000LL * (avctx->rc_buffer_size - 1) >
509  s->avctx->rc_max_rate * 0xFFFFLL) {
511  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
512  "specified vbv buffer is too large for the given bitrate!\n");
513  }
514 
515  if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
516  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
517  s->codec_id != AV_CODEC_ID_FLV1) {
518  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
519  return AVERROR(EINVAL);
520  }
521 
522  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
524  "OBMC is only supported with simple mb decision\n");
525  return AVERROR(EINVAL);
526  }
527 
528  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
529  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
530  return AVERROR(EINVAL);
531  }
532 
533  if (s->max_b_frames &&
534  s->codec_id != AV_CODEC_ID_MPEG4 &&
535  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
536  s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
537  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
538  return AVERROR(EINVAL);
539  }
540  if (s->max_b_frames < 0) {
542  "max b frames must be 0 or positive for mpegvideo based encoders\n");
543  return AVERROR(EINVAL);
544  }
545 
546  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
547  s->codec_id == AV_CODEC_ID_H263 ||
548  s->codec_id == AV_CODEC_ID_H263P) &&
549  (avctx->sample_aspect_ratio.num > 255 ||
550  avctx->sample_aspect_ratio.den > 255)) {
552  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
556  }
557 
558  if ((s->codec_id == AV_CODEC_ID_H263 ||
559  s->codec_id == AV_CODEC_ID_H263P) &&
560  (avctx->width > 2048 ||
561  avctx->height > 1152 )) {
562  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
563  return AVERROR(EINVAL);
564  }
565  if ((s->codec_id == AV_CODEC_ID_H263 ||
566  s->codec_id == AV_CODEC_ID_H263P) &&
567  ((avctx->width &3) ||
568  (avctx->height&3) )) {
569  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
570  return AVERROR(EINVAL);
571  }
572 
573  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
574  (avctx->width > 4095 ||
575  avctx->height > 4095 )) {
576  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
577  return AVERROR(EINVAL);
578  }
579 
580  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
581  (avctx->width > 16383 ||
582  avctx->height > 16383 )) {
583  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
584  return AVERROR(EINVAL);
585  }
586 
587  if (s->codec_id == AV_CODEC_ID_RV10 &&
588  (avctx->width &15 ||
589  avctx->height&15 )) {
590  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
591  return AVERROR(EINVAL);
592  }
593 
594  if (s->codec_id == AV_CODEC_ID_RV20 &&
595  (avctx->width &3 ||
596  avctx->height&3 )) {
597  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
598  return AVERROR(EINVAL);
599  }
600 
601  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
602  s->codec_id == AV_CODEC_ID_WMV2) &&
603  avctx->width & 1) {
604  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
605  return AVERROR(EINVAL);
606  }
607 
608  if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
609  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
610  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
611  return AVERROR(EINVAL);
612  }
613 
614 #if FF_API_PRIVATE_OPT
616  if (avctx->mpeg_quant)
617  s->mpeg_quant = avctx->mpeg_quant;
619 #endif
620 
621  // FIXME mpeg2 uses that too
622  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
623  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
625  "mpeg2 style quantization not supported by codec\n");
626  return AVERROR(EINVAL);
627  }
628 
629  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
630  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
631  return AVERROR(EINVAL);
632  }
633 
634  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
635  s->avctx->mb_decision != FF_MB_DECISION_RD) {
636  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
637  return AVERROR(EINVAL);
638  }
639 
640  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
641  (s->codec_id == AV_CODEC_ID_AMV ||
642  s->codec_id == AV_CODEC_ID_MJPEG)) {
643  // Used to produce garbage with MJPEG.
645  "QP RD is no longer compatible with MJPEG or AMV\n");
646  return AVERROR(EINVAL);
647  }
648 
649 #if FF_API_PRIVATE_OPT
652  s->scenechange_threshold = avctx->scenechange_threshold;
654 #endif
655 
656  if (s->scenechange_threshold < 1000000000 &&
657  (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
659  "closed gop with scene change detection are not supported yet, "
660  "set threshold to 1000000000\n");
661  return AVERROR_PATCHWELCOME;
662  }
663 
664  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
665  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
666  s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
668  "low delay forcing is only available for mpeg2, "
669  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
670  return AVERROR(EINVAL);
671  }
672  if (s->max_b_frames != 0) {
674  "B-frames cannot be used with low delay\n");
675  return AVERROR(EINVAL);
676  }
677  }
678 
679  if (s->q_scale_type == 1) {
680  if (avctx->qmax > 28) {
682  "non linear quant only supports qmax <= 28 currently\n");
683  return AVERROR_PATCHWELCOME;
684  }
685  }
686 
687  if (avctx->slices > 1 &&
689  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
690  return AVERROR(EINVAL);
691  }
692 
693  if (s->avctx->thread_count > 1 &&
694  s->codec_id != AV_CODEC_ID_MPEG4 &&
695  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
696  s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
697  s->codec_id != AV_CODEC_ID_MJPEG &&
698  (s->codec_id != AV_CODEC_ID_H263P)) {
700  "multi threaded encoding not supported by codec\n");
701  return AVERROR_PATCHWELCOME;
702  }
703 
704  if (s->avctx->thread_count < 1) {
706  "automatic thread number detection not supported by codec, "
707  "patch welcome\n");
708  return AVERROR_PATCHWELCOME;
709  }
710 
711  if (!avctx->time_base.den || !avctx->time_base.num) {
712  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
713  return AVERROR(EINVAL);
714  }
715 
716 #if FF_API_PRIVATE_OPT
718  if (avctx->b_frame_strategy)
719  s->b_frame_strategy = avctx->b_frame_strategy;
720  if (avctx->b_sensitivity != 40)
721  s->b_sensitivity = avctx->b_sensitivity;
723 #endif
724 
725  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
727  "notice: b_frame_strategy only affects the first pass\n");
728  s->b_frame_strategy = 0;
729  }
730 
732  if (i > 1) {
733  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
734  avctx->time_base.den /= i;
735  avctx->time_base.num /= i;
736  //return -1;
737  }
738 
739  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
740  // (a + x * 3 / 8) / x
741  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
742  s->inter_quant_bias = 0;
743  } else {
744  s->intra_quant_bias = 0;
745  // (a - x / 4) / x
746  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
747  }
748 
749  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
750  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
751  return AVERROR(EINVAL);
752  }
753 
754  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
755 
756  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
757  s->avctx->time_base.den > (1 << 16) - 1) {
759  "timebase %d/%d not supported by MPEG 4 standard, "
760  "the maximum admitted value for the timebase denominator "
761  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
762  (1 << 16) - 1);
763  return AVERROR(EINVAL);
764  }
765  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
766 
767  switch (avctx->codec->id) {
769  s->out_format = FMT_MPEG1;
770  s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
771  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
772  break;
774  s->out_format = FMT_MPEG1;
775  s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
776  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
777  s->rtp_mode = 1;
778  break;
779  case AV_CODEC_ID_MJPEG:
780  case AV_CODEC_ID_AMV:
781  s->out_format = FMT_MJPEG;
782  s->intra_only = 1; /* force intra only for jpeg */
783  if (!CONFIG_MJPEG_ENCODER)
785  if ((ret = ff_mjpeg_encode_init(s)) < 0)
786  return ret;
787  avctx->delay = 0;
788  s->low_delay = 1;
789  break;
790  case AV_CODEC_ID_H261:
791  if (!CONFIG_H261_ENCODER)
793  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
795  "The specified picture size of %dx%d is not valid for the "
796  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
797  s->width, s->height);
798  return AVERROR(EINVAL);
799  }
800  s->out_format = FMT_H261;
801  avctx->delay = 0;
802  s->low_delay = 1;
803  s->rtp_mode = 0; /* Sliced encoding not supported */
804  break;
805  case AV_CODEC_ID_H263:
806  if (!CONFIG_H263_ENCODER)
809  s->width, s->height) == 8) {
811  "The specified picture size of %dx%d is not valid for "
812  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
813  "352x288, 704x576, and 1408x1152. "
814  "Try H.263+.\n", s->width, s->height);
815  return AVERROR(EINVAL);
816  }
817  s->out_format = FMT_H263;
818  avctx->delay = 0;
819  s->low_delay = 1;
820  break;
821  case AV_CODEC_ID_H263P:
822  s->out_format = FMT_H263;
823  s->h263_plus = 1;
824  /* Fx */
825  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
826  s->modified_quant = s->h263_aic;
827  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
828  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
829 
830  /* /Fx */
831  /* These are just to be sure */
832  avctx->delay = 0;
833  s->low_delay = 1;
834  break;
835  case AV_CODEC_ID_FLV1:
836  s->out_format = FMT_H263;
837  s->h263_flv = 2; /* format = 1; 11-bit codes */
838  s->unrestricted_mv = 1;
839  s->rtp_mode = 0; /* don't allow GOB */
840  avctx->delay = 0;
841  s->low_delay = 1;
842  break;
843  case AV_CODEC_ID_RV10:
844  s->out_format = FMT_H263;
845  avctx->delay = 0;
846  s->low_delay = 1;
847  break;
848  case AV_CODEC_ID_RV20:
849  s->out_format = FMT_H263;
850  avctx->delay = 0;
851  s->low_delay = 1;
852  s->modified_quant = 1;
853  s->h263_aic = 1;
854  s->h263_plus = 1;
855  s->loop_filter = 1;
856  s->unrestricted_mv = 0;
857  break;
858  case AV_CODEC_ID_MPEG4:
859  s->out_format = FMT_H263;
860  s->h263_pred = 1;
861  s->unrestricted_mv = 1;
862  s->low_delay = s->max_b_frames ? 0 : 1;
863  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
864  break;
866  s->out_format = FMT_H263;
867  s->h263_pred = 1;
868  s->unrestricted_mv = 1;
869  s->msmpeg4_version = 2;
870  avctx->delay = 0;
871  s->low_delay = 1;
872  break;
874  s->out_format = FMT_H263;
875  s->h263_pred = 1;
876  s->unrestricted_mv = 1;
877  s->msmpeg4_version = 3;
878  s->flipflop_rounding = 1;
879  avctx->delay = 0;
880  s->low_delay = 1;
881  break;
882  case AV_CODEC_ID_WMV1:
883  s->out_format = FMT_H263;
884  s->h263_pred = 1;
885  s->unrestricted_mv = 1;
886  s->msmpeg4_version = 4;
887  s->flipflop_rounding = 1;
888  avctx->delay = 0;
889  s->low_delay = 1;
890  break;
891  case AV_CODEC_ID_WMV2:
892  s->out_format = FMT_H263;
893  s->h263_pred = 1;
894  s->unrestricted_mv = 1;
895  s->msmpeg4_version = 5;
896  s->flipflop_rounding = 1;
897  avctx->delay = 0;
898  s->low_delay = 1;
899  break;
900  default:
901  return AVERROR(EINVAL);
902  }
903 
904 #if FF_API_PRIVATE_OPT
906  if (avctx->noise_reduction)
907  s->noise_reduction = avctx->noise_reduction;
909 #endif
910 
911  avctx->has_b_frames = !s->low_delay;
912 
913  s->encoding = 1;
914 
915  s->progressive_frame =
916  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
918  s->alternate_scan);
919 
920  /* init */
922  if ((ret = ff_mpv_common_init(s)) < 0)
923  return ret;
924 
925  ff_fdctdsp_init(&s->fdsp, avctx);
926  ff_me_cmp_init(&s->mecc, avctx);
927  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
928  ff_pixblockdsp_init(&s->pdsp, avctx);
929  ff_qpeldsp_init(&s->qdsp);
930 
931  if (s->msmpeg4_version) {
932  FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
933  2 * 2 * (MAX_LEVEL + 1) *
934  (MAX_RUN + 1) * 2 * sizeof(int), fail);
935  }
936  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
937 
938  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
939  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
940  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
941  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
942  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
943  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
944  FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
945  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
946  FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
947  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
948 
949 
950  if (s->noise_reduction) {
951  FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
952  2 * 64 * sizeof(uint16_t), fail);
953  }
954 
956 
957  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
958  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
959 
960  if (s->slice_context_count > 1) {
961  s->rtp_mode = 1;
962 
964  s->h263_slice_structured = 1;
965  }
966 
967  s->quant_precision = 5;
968 
969 #if FF_API_PRIVATE_OPT
972  s->frame_skip_threshold = avctx->frame_skip_threshold;
974  s->frame_skip_factor = avctx->frame_skip_factor;
975  if (avctx->frame_skip_exp)
976  s->frame_skip_exp = avctx->frame_skip_exp;
978  s->frame_skip_cmp = avctx->frame_skip_cmp;
980 #endif
981 
982  ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
983  ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
984 
985  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
987  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
989  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
990  if ((ret = ff_msmpeg4_encode_init(s)) < 0)
991  return ret;
992  if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
993  && s->out_format == FMT_MPEG1)
995 
996  /* init q matrix */
997  for (i = 0; i < 64; i++) {
998  int j = s->idsp.idct_permutation[i];
999  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1000  s->mpeg_quant) {
1001  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1002  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1003  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1004  s->intra_matrix[j] =
1005  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1006  } else {
1007  /* MPEG-1/2 */
1008  s->chroma_intra_matrix[j] =
1009  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1010  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1011  }
1012  if (s->avctx->intra_matrix)
1013  s->intra_matrix[j] = s->avctx->intra_matrix[i];
1014  if (s->avctx->inter_matrix)
1015  s->inter_matrix[j] = s->avctx->inter_matrix[i];
1016  }
1017 
1018  /* precompute matrix */
1019  /* for mjpeg, we do include qscale in the matrix */
1020  if (s->out_format != FMT_MJPEG) {
1021  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1022  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1023  31, 1);
1024  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1025  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1026  31, 0);
1027  }
1028 
1029  if ((ret = ff_rate_control_init(s)) < 0)
1030  return ret;
1031 
1032 #if FF_API_PRIVATE_OPT
1034  if (avctx->brd_scale)
1035  s->brd_scale = avctx->brd_scale;
1036 
1037  if (avctx->prediction_method)
1038  s->pred = avctx->prediction_method + 1;
1040 #endif
1041 
1042  if (s->b_frame_strategy == 2) {
1043  for (i = 0; i < s->max_b_frames + 2; i++) {
1044  s->tmp_frames[i] = av_frame_alloc();
1045  if (!s->tmp_frames[i])
1046  return AVERROR(ENOMEM);
1047 
1048  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1049  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1050  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1051 
1052  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1053  if (ret < 0)
1054  return ret;
1055  }
1056  }
1057 
1058  cpb_props = ff_add_cpb_side_data(avctx);
1059  if (!cpb_props)
1060  return AVERROR(ENOMEM);
1061  cpb_props->max_bitrate = avctx->rc_max_rate;
1062  cpb_props->min_bitrate = avctx->rc_min_rate;
1063  cpb_props->avg_bitrate = avctx->bit_rate;
1064  cpb_props->buffer_size = avctx->rc_buffer_size;
1065 
1066  return 0;
1067 fail:
1069  return AVERROR_UNKNOWN;
1070 }
1071 
1073 {
1075  int i;
1076 
1078 
1080  if (CONFIG_MJPEG_ENCODER &&
1081  s->out_format == FMT_MJPEG)
1083 
1085 
1086  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1087  av_frame_free(&s->tmp_frames[i]);
1088 
1089  ff_free_picture_tables(&s->new_picture);
1090  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1091 
1092  av_freep(&s->avctx->stats_out);
1093  av_freep(&s->ac_stats);
1094 
1095  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1096  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1097  s->q_chroma_intra_matrix= NULL;
1098  s->q_chroma_intra_matrix16= NULL;
1099  av_freep(&s->q_intra_matrix);
1100  av_freep(&s->q_inter_matrix);
1101  av_freep(&s->q_intra_matrix16);
1102  av_freep(&s->q_inter_matrix16);
1103  av_freep(&s->input_picture);
1104  av_freep(&s->reordered_input_picture);
1105  av_freep(&s->dct_offset);
1106 
1107  return 0;
1108 }
1109 
1110 static int get_sae(uint8_t *src, int ref, int stride)
1111 {
1112  int x,y;
1113  int acc = 0;
1114 
1115  for (y = 0; y < 16; y++) {
1116  for (x = 0; x < 16; x++) {
1117  acc += FFABS(src[x + y * stride] - ref);
1118  }
1119  }
1120 
1121  return acc;
1122 }
1123 
1125  uint8_t *ref, int stride)
1126 {
1127  int x, y, w, h;
1128  int acc = 0;
1129 
1130  w = s->width & ~15;
1131  h = s->height & ~15;
1132 
1133  for (y = 0; y < h; y += 16) {
1134  for (x = 0; x < w; x += 16) {
1135  int offset = x + y * stride;
1136  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1137  stride, 16);
1138  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1139  int sae = get_sae(src + offset, mean, stride);
1140 
1141  acc += sae + 500 < sad;
1142  }
1143  }
1144  return acc;
1145 }
1146 
1147 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1148 {
1149  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1150  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1151  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1152  &s->linesize, &s->uvlinesize);
1153 }
1154 
1155 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1156 {
1157  Picture *pic = NULL;
1158  int64_t pts;
1159  int i, display_picture_number = 0, ret;
1160  int encoding_delay = s->max_b_frames ? s->max_b_frames
1161  : (s->low_delay ? 0 : 1);
1162  int flush_offset = 1;
1163  int direct = 1;
1164 
1165  if (pic_arg) {
1166  pts = pic_arg->pts;
1167  display_picture_number = s->input_picture_number++;
1168 
1169  if (pts != AV_NOPTS_VALUE) {
1170  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1171  int64_t last = s->user_specified_pts;
1172 
1173  if (pts <= last) {
1174  av_log(s->avctx, AV_LOG_ERROR,
1175  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1176  pts, last);
1177  return AVERROR(EINVAL);
1178  }
1179 
1180  if (!s->low_delay && display_picture_number == 1)
1181  s->dts_delta = pts - last;
1182  }
1183  s->user_specified_pts = pts;
1184  } else {
1185  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1186  s->user_specified_pts =
1187  pts = s->user_specified_pts + 1;
1188  av_log(s->avctx, AV_LOG_INFO,
1189  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1190  pts);
1191  } else {
1192  pts = display_picture_number;
1193  }
1194  }
1195 
1196  if (!pic_arg->buf[0] ||
1197  pic_arg->linesize[0] != s->linesize ||
1198  pic_arg->linesize[1] != s->uvlinesize ||
1199  pic_arg->linesize[2] != s->uvlinesize)
1200  direct = 0;
1201  if ((s->width & 15) || (s->height & 15))
1202  direct = 0;
1203  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1204  direct = 0;
1205  if (s->linesize & (STRIDE_ALIGN-1))
1206  direct = 0;
1207 
1208  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1209  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1210 
1211  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1212  if (i < 0)
1213  return i;
1214 
1215  pic = &s->picture[i];
1216  pic->reference = 3;
1217 
1218  if (direct) {
1219  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1220  return ret;
1221  }
1222  ret = alloc_picture(s, pic, direct);
1223  if (ret < 0)
1224  return ret;
1225 
1226  if (!direct) {
1227  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1228  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1229  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1230  // empty
1231  } else {
1232  int h_chroma_shift, v_chroma_shift;
1233  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1234  &h_chroma_shift,
1235  &v_chroma_shift);
1236 
1237  for (i = 0; i < 3; i++) {
1238  int src_stride = pic_arg->linesize[i];
1239  int dst_stride = i ? s->uvlinesize : s->linesize;
1240  int h_shift = i ? h_chroma_shift : 0;
1241  int v_shift = i ? v_chroma_shift : 0;
1242  int w = s->width >> h_shift;
1243  int h = s->height >> v_shift;
1244  uint8_t *src = pic_arg->data[i];
1245  uint8_t *dst = pic->f->data[i];
1246  int vpad = 16;
1247 
1248  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1249  && !s->progressive_sequence
1250  && FFALIGN(s->height, 32) - s->height > 16)
1251  vpad = 32;
1252 
1253  if (!s->avctx->rc_buffer_size)
1254  dst += INPLACE_OFFSET;
1255 
1256  if (src_stride == dst_stride)
1257  memcpy(dst, src, src_stride * h);
1258  else {
1259  int h2 = h;
1260  uint8_t *dst2 = dst;
1261  while (h2--) {
1262  memcpy(dst2, src, w);
1263  dst2 += dst_stride;
1264  src += src_stride;
1265  }
1266  }
1267  if ((s->width & 15) || (s->height & (vpad-1))) {
1268  s->mpvencdsp.draw_edges(dst, dst_stride,
1269  w, h,
1270  16 >> h_shift,
1271  vpad >> v_shift,
1272  EDGE_BOTTOM);
1273  }
1274  }
1275  emms_c();
1276  }
1277  }
1278  ret = av_frame_copy_props(pic->f, pic_arg);
1279  if (ret < 0)
1280  return ret;
1281 
1282  pic->f->display_picture_number = display_picture_number;
1283  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1284  } else {
1285  /* Flushing: When we have not received enough input frames,
1286  * ensure s->input_picture[0] contains the first picture */
1287  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1288  if (s->input_picture[flush_offset])
1289  break;
1290 
1291  if (flush_offset <= 1)
1292  flush_offset = 1;
1293  else
1294  encoding_delay = encoding_delay - flush_offset + 1;
1295  }
1296 
1297  /* shift buffer entries */
1298  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1299  s->input_picture[i - flush_offset] = s->input_picture[i];
1300 
1301  s->input_picture[encoding_delay] = (Picture*) pic;
1302 
1303  return 0;
1304 }
1305 
1307 {
1308  int x, y, plane;
1309  int score = 0;
1310  int64_t score64 = 0;
1311 
1312  for (plane = 0; plane < 3; plane++) {
1313  const int stride = p->f->linesize[plane];
1314  const int bw = plane ? 1 : 2;
1315  for (y = 0; y < s->mb_height * bw; y++) {
1316  for (x = 0; x < s->mb_width * bw; x++) {
1317  int off = p->shared ? 0 : 16;
1318  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1319  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1320  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1321 
1322  switch (FFABS(s->frame_skip_exp)) {
1323  case 0: score = FFMAX(score, v); break;
1324  case 1: score += FFABS(v); break;
1325  case 2: score64 += v * (int64_t)v; break;
1326  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1327  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1328  }
1329  }
1330  }
1331  }
1332  emms_c();
1333 
1334  if (score)
1335  score64 = score;
1336  if (s->frame_skip_exp < 0)
1337  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1338  -1.0/s->frame_skip_exp);
1339 
1340  if (score64 < s->frame_skip_threshold)
1341  return 1;
1342  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1343  return 1;
1344  return 0;
1345 }
1346 
1348 {
1349  AVPacket pkt = { 0 };
1350  int ret;
1351  int size = 0;
1352 
1353  av_init_packet(&pkt);
1354 
1356  if (ret < 0)
1357  return ret;
1358 
1359  do {
1361  if (ret >= 0) {
1362  size += pkt.size;
1363  av_packet_unref(&pkt);
1364  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1365  return ret;
1366  } while (ret >= 0);
1367 
1368  return size;
1369 }
1370 
1372 {
1373  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1374  const int scale = s->brd_scale;
1375  int width = s->width >> scale;
1376  int height = s->height >> scale;
1377  int i, j, out_size, p_lambda, b_lambda, lambda2;
1378  int64_t best_rd = INT64_MAX;
1379  int best_b_count = -1;
1380  int ret = 0;
1381 
1382  av_assert0(scale >= 0 && scale <= 3);
1383 
1384  //emms_c();
1385  //s->next_picture_ptr->quality;
1386  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1387  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1388  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1389  if (!b_lambda) // FIXME we should do this somewhere else
1390  b_lambda = p_lambda;
1391  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1393 
1394  for (i = 0; i < s->max_b_frames + 2; i++) {
1395  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1396  s->next_picture_ptr;
1397  uint8_t *data[4];
1398 
1399  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1400  pre_input = *pre_input_ptr;
1401  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1402 
1403  if (!pre_input.shared && i) {
1404  data[0] += INPLACE_OFFSET;
1405  data[1] += INPLACE_OFFSET;
1406  data[2] += INPLACE_OFFSET;
1407  }
1408 
1409  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1410  s->tmp_frames[i]->linesize[0],
1411  data[0],
1412  pre_input.f->linesize[0],
1413  width, height);
1414  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1415  s->tmp_frames[i]->linesize[1],
1416  data[1],
1417  pre_input.f->linesize[1],
1418  width >> 1, height >> 1);
1419  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1420  s->tmp_frames[i]->linesize[2],
1421  data[2],
1422  pre_input.f->linesize[2],
1423  width >> 1, height >> 1);
1424  }
1425  }
1426 
1427  for (j = 0; j < s->max_b_frames + 1; j++) {
1428  AVCodecContext *c;
1429  int64_t rd = 0;
1430 
1431  if (!s->input_picture[j])
1432  break;
1433 
1435  if (!c)
1436  return AVERROR(ENOMEM);
1437 
1438  c->width = width;
1439  c->height = height;
1441  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1442  c->mb_decision = s->avctx->mb_decision;
1443  c->me_cmp = s->avctx->me_cmp;
1444  c->mb_cmp = s->avctx->mb_cmp;
1445  c->me_sub_cmp = s->avctx->me_sub_cmp;
1446  c->pix_fmt = AV_PIX_FMT_YUV420P;
1447  c->time_base = s->avctx->time_base;
1448  c->max_b_frames = s->max_b_frames;
1449 
1450  ret = avcodec_open2(c, codec, NULL);
1451  if (ret < 0)
1452  goto fail;
1453 
1454  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1455  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1456 
1457  out_size = encode_frame(c, s->tmp_frames[0]);
1458  if (out_size < 0) {
1459  ret = out_size;
1460  goto fail;
1461  }
1462 
1463  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1464 
1465  for (i = 0; i < s->max_b_frames + 1; i++) {
1466  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1467 
1468  s->tmp_frames[i + 1]->pict_type = is_p ?
1470  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1471 
1472  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1473  if (out_size < 0) {
1474  ret = out_size;
1475  goto fail;
1476  }
1477 
1478  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1479  }
1480 
1481  /* get the delayed frames */
1483  if (out_size < 0) {
1484  ret = out_size;
1485  goto fail;
1486  }
1487  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1488 
1489  rd += c->error[0] + c->error[1] + c->error[2];
1490 
1491  if (rd < best_rd) {
1492  best_rd = rd;
1493  best_b_count = j;
1494  }
1495 
1496 fail:
1498  if (ret < 0)
1499  return ret;
1500  }
1501 
1502  return best_b_count;
1503 }
1504 
1506 {
1507  int i, ret;
1508 
1509  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1510  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1511  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1512 
1513  /* set next picture type & ordering */
1514  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1515  if (s->frame_skip_threshold || s->frame_skip_factor) {
1516  if (s->picture_in_gop_number < s->gop_size &&
1517  s->next_picture_ptr &&
1518  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1519  // FIXME check that the gop check above is +-1 correct
1520  av_frame_unref(s->input_picture[0]->f);
1521 
1522  ff_vbv_update(s, 0);
1523 
1524  goto no_output_pic;
1525  }
1526  }
1527 
1528  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1529  !s->next_picture_ptr || s->intra_only) {
1530  s->reordered_input_picture[0] = s->input_picture[0];
1531  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1532  s->reordered_input_picture[0]->f->coded_picture_number =
1533  s->coded_picture_number++;
1534  } else {
1535  int b_frames = 0;
1536 
1537  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1538  for (i = 0; i < s->max_b_frames + 1; i++) {
1539  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1540 
1541  if (pict_num >= s->rc_context.num_entries)
1542  break;
1543  if (!s->input_picture[i]) {
1544  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1545  break;
1546  }
1547 
1548  s->input_picture[i]->f->pict_type =
1549  s->rc_context.entry[pict_num].new_pict_type;
1550  }
1551  }
1552 
1553  if (s->b_frame_strategy == 0) {
1554  b_frames = s->max_b_frames;
1555  while (b_frames && !s->input_picture[b_frames])
1556  b_frames--;
1557  } else if (s->b_frame_strategy == 1) {
1558  for (i = 1; i < s->max_b_frames + 1; i++) {
1559  if (s->input_picture[i] &&
1560  s->input_picture[i]->b_frame_score == 0) {
1561  s->input_picture[i]->b_frame_score =
1563  s->input_picture[i ]->f->data[0],
1564  s->input_picture[i - 1]->f->data[0],
1565  s->linesize) + 1;
1566  }
1567  }
1568  for (i = 0; i < s->max_b_frames + 1; i++) {
1569  if (!s->input_picture[i] ||
1570  s->input_picture[i]->b_frame_score - 1 >
1571  s->mb_num / s->b_sensitivity)
1572  break;
1573  }
1574 
1575  b_frames = FFMAX(0, i - 1);
1576 
1577  /* reset scores */
1578  for (i = 0; i < b_frames + 1; i++) {
1579  s->input_picture[i]->b_frame_score = 0;
1580  }
1581  } else if (s->b_frame_strategy == 2) {
1582  b_frames = estimate_best_b_count(s);
1583  if (b_frames < 0)
1584  return b_frames;
1585  }
1586 
1587  emms_c();
1588 
1589  for (i = b_frames - 1; i >= 0; i--) {
1590  int type = s->input_picture[i]->f->pict_type;
1591  if (type && type != AV_PICTURE_TYPE_B)
1592  b_frames = i;
1593  }
1594  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1595  b_frames == s->max_b_frames) {
1596  av_log(s->avctx, AV_LOG_ERROR,
1597  "warning, too many B-frames in a row\n");
1598  }
1599 
1600  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1601  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1602  s->gop_size > s->picture_in_gop_number) {
1603  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1604  } else {
1605  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1606  b_frames = 0;
1607  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1608  }
1609  }
1610 
1611  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1612  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1613  b_frames--;
1614 
1615  s->reordered_input_picture[0] = s->input_picture[b_frames];
1616  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1617  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1618  s->reordered_input_picture[0]->f->coded_picture_number =
1619  s->coded_picture_number++;
1620  for (i = 0; i < b_frames; i++) {
1621  s->reordered_input_picture[i + 1] = s->input_picture[i];
1622  s->reordered_input_picture[i + 1]->f->pict_type =
1624  s->reordered_input_picture[i + 1]->f->coded_picture_number =
1625  s->coded_picture_number++;
1626  }
1627  }
1628  }
1629 no_output_pic:
1630  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1631 
1632  if (s->reordered_input_picture[0]) {
1633  s->reordered_input_picture[0]->reference =
1634  s->reordered_input_picture[0]->f->pict_type !=
1635  AV_PICTURE_TYPE_B ? 3 : 0;
1636 
1637  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1638  return ret;
1639 
1640  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1641  // input is a shared pix, so we can't modify it -> allocate a new
1642  // one & ensure that the shared one is reuseable
1643 
1644  Picture *pic;
1645  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1646  if (i < 0)
1647  return i;
1648  pic = &s->picture[i];
1649 
1650  pic->reference = s->reordered_input_picture[0]->reference;
1651  if (alloc_picture(s, pic, 0) < 0) {
1652  return -1;
1653  }
1654 
1655  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1656  if (ret < 0)
1657  return ret;
1658 
1659  /* mark us unused / free shared pic */
1660  av_frame_unref(s->reordered_input_picture[0]->f);
1661  s->reordered_input_picture[0]->shared = 0;
1662 
1663  s->current_picture_ptr = pic;
1664  } else {
1665  // input is not a shared pix -> reuse buffer for current_pix
1666  s->current_picture_ptr = s->reordered_input_picture[0];
1667  for (i = 0; i < 4; i++) {
1668  s->new_picture.f->data[i] += INPLACE_OFFSET;
1669  }
1670  }
1671  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1672  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1673  s->current_picture_ptr)) < 0)
1674  return ret;
1675 
1676  s->picture_number = s->new_picture.f->display_picture_number;
1677  }
1678  return 0;
1679 }
1680 
1682 {
1683  if (s->unrestricted_mv &&
1684  s->current_picture.reference &&
1685  !s->intra_only) {
1686  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1687  int hshift = desc->log2_chroma_w;
1688  int vshift = desc->log2_chroma_h;
1689  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1690  s->current_picture.f->linesize[0],
1691  s->h_edge_pos, s->v_edge_pos,
1693  EDGE_TOP | EDGE_BOTTOM);
1694  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1695  s->current_picture.f->linesize[1],
1696  s->h_edge_pos >> hshift,
1697  s->v_edge_pos >> vshift,
1698  EDGE_WIDTH >> hshift,
1699  EDGE_WIDTH >> vshift,
1700  EDGE_TOP | EDGE_BOTTOM);
1701  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1702  s->current_picture.f->linesize[2],
1703  s->h_edge_pos >> hshift,
1704  s->v_edge_pos >> vshift,
1705  EDGE_WIDTH >> hshift,
1706  EDGE_WIDTH >> vshift,
1707  EDGE_TOP | EDGE_BOTTOM);
1708  }
1709 
1710  emms_c();
1711 
1712  s->last_pict_type = s->pict_type;
1713  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1714  if (s->pict_type!= AV_PICTURE_TYPE_B)
1715  s->last_non_b_pict_type = s->pict_type;
1716 
1717 #if FF_API_CODED_FRAME
1719  av_frame_unref(s->avctx->coded_frame);
1720  av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1722 #endif
1723 #if FF_API_ERROR_FRAME
1725  memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1726  sizeof(s->current_picture.encoding_error));
1728 #endif
1729 }
1730 
1732 {
1733  int intra, i;
1734 
1735  for (intra = 0; intra < 2; intra++) {
1736  if (s->dct_count[intra] > (1 << 16)) {
1737  for (i = 0; i < 64; i++) {
1738  s->dct_error_sum[intra][i] >>= 1;
1739  }
1740  s->dct_count[intra] >>= 1;
1741  }
1742 
1743  for (i = 0; i < 64; i++) {
1744  s->dct_offset[intra][i] = (s->noise_reduction *
1745  s->dct_count[intra] +
1746  s->dct_error_sum[intra][i] / 2) /
1747  (s->dct_error_sum[intra][i] + 1);
1748  }
1749  }
1750 }
1751 
1753 {
1754  int ret;
1755 
1756  /* mark & release old frames */
1757  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1758  s->last_picture_ptr != s->next_picture_ptr &&
1759  s->last_picture_ptr->f->buf[0]) {
1760  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1761  }
1762 
1763  s->current_picture_ptr->f->pict_type = s->pict_type;
1764  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1765 
1766  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1767  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1768  s->current_picture_ptr)) < 0)
1769  return ret;
1770 
1771  if (s->pict_type != AV_PICTURE_TYPE_B) {
1772  s->last_picture_ptr = s->next_picture_ptr;
1773  if (!s->droppable)
1774  s->next_picture_ptr = s->current_picture_ptr;
1775  }
1776 
1777  if (s->last_picture_ptr) {
1778  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1779  if (s->last_picture_ptr->f->buf[0] &&
1780  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1781  s->last_picture_ptr)) < 0)
1782  return ret;
1783  }
1784  if (s->next_picture_ptr) {
1785  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1786  if (s->next_picture_ptr->f->buf[0] &&
1787  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1788  s->next_picture_ptr)) < 0)
1789  return ret;
1790  }
1791 
1792  if (s->picture_structure!= PICT_FRAME) {
1793  int i;
1794  for (i = 0; i < 4; i++) {
1795  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1796  s->current_picture.f->data[i] +=
1797  s->current_picture.f->linesize[i];
1798  }
1799  s->current_picture.f->linesize[i] *= 2;
1800  s->last_picture.f->linesize[i] *= 2;
1801  s->next_picture.f->linesize[i] *= 2;
1802  }
1803  }
1804 
1805  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1806  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1807  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1808  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1809  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1810  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1811  } else {
1812  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1813  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1814  }
1815 
1816  if (s->dct_error_sum) {
1817  av_assert2(s->noise_reduction && s->encoding);
1819  }
1820 
1821  return 0;
1822 }
1823 
1825  const AVFrame *pic_arg, int *got_packet)
1826 {
1828  int i, stuffing_count, ret;
1829  int context_count = s->slice_context_count;
1830 
1831  s->vbv_ignore_qmax = 0;
1832 
1833  s->picture_in_gop_number++;
1834 
1835  if (load_input_picture(s, pic_arg) < 0)
1836  return -1;
1837 
1838  if (select_input_picture(s) < 0) {
1839  return -1;
1840  }
1841 
1842  /* output? */
1843  if (s->new_picture.f->data[0]) {
1844  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1845  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1846  :
1847  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1848  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1849  return ret;
1850  if (s->mb_info) {
1851  s->mb_info_ptr = av_packet_new_side_data(pkt,
1853  s->mb_width*s->mb_height*12);
1854  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1855  }
1856 
1857  for (i = 0; i < context_count; i++) {
1858  int start_y = s->thread_context[i]->start_mb_y;
1859  int end_y = s->thread_context[i]-> end_mb_y;
1860  int h = s->mb_height;
1861  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1862  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1863 
1864  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1865  }
1866 
1867  s->pict_type = s->new_picture.f->pict_type;
1868  //emms_c();
1869  ret = frame_start(s);
1870  if (ret < 0)
1871  return ret;
1872 vbv_retry:
1873  ret = encode_picture(s, s->picture_number);
1874  if (growing_buffer) {
1875  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1876  pkt->data = s->pb.buf;
1878  }
1879  if (ret < 0)
1880  return -1;
1881 
1882 #if FF_API_STAT_BITS
1884  avctx->header_bits = s->header_bits;
1885  avctx->mv_bits = s->mv_bits;
1886  avctx->misc_bits = s->misc_bits;
1887  avctx->i_tex_bits = s->i_tex_bits;
1888  avctx->p_tex_bits = s->p_tex_bits;
1889  avctx->i_count = s->i_count;
1890  // FIXME f/b_count in avctx
1891  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1892  avctx->skip_count = s->skip_count;
1894 #endif
1895 
1896  frame_end(s);
1897 
1898  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1899  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1900 
1901  if (avctx->rc_buffer_size) {
1902  RateControlContext *rcc = &s->rc_context;
1903  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1904  int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1905  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1906 
1907  if (put_bits_count(&s->pb) > max_size &&
1908  s->lambda < s->lmax) {
1909  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1910  (s->qscale + 1) / s->qscale);
1911  if (s->adaptive_quant) {
1912  int i;
1913  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1914  s->lambda_table[i] =
1915  FFMAX(s->lambda_table[i] + min_step,
1916  s->lambda_table[i] * (s->qscale + 1) /
1917  s->qscale);
1918  }
1919  s->mb_skipped = 0; // done in frame_start()
1920  // done in encode_picture() so we must undo it
1921  if (s->pict_type == AV_PICTURE_TYPE_P) {
1922  if (s->flipflop_rounding ||
1923  s->codec_id == AV_CODEC_ID_H263P ||
1924  s->codec_id == AV_CODEC_ID_MPEG4)
1925  s->no_rounding ^= 1;
1926  }
1927  if (s->pict_type != AV_PICTURE_TYPE_B) {
1928  s->time_base = s->last_time_base;
1929  s->last_non_b_time = s->time - s->pp_time;
1930  }
1931  for (i = 0; i < context_count; i++) {
1932  PutBitContext *pb = &s->thread_context[i]->pb;
1933  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1934  }
1935  s->vbv_ignore_qmax = 1;
1936  av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1937  goto vbv_retry;
1938  }
1939 
1940  av_assert0(s->avctx->rc_max_rate);
1941  }
1942 
1943  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1945 
1946  for (i = 0; i < 4; i++) {
1947  s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1948  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1949  }
1950  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1951  s->current_picture_ptr->encoding_error,
1952  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1953  s->pict_type);
1954 
1955  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1956  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1957  s->misc_bits + s->i_tex_bits +
1958  s->p_tex_bits);
1959  flush_put_bits(&s->pb);
1960  s->frame_bits = put_bits_count(&s->pb);
1961 
1962  stuffing_count = ff_vbv_update(s, s->frame_bits);
1963  s->stuffing_bits = 8*stuffing_count;
1964  if (stuffing_count) {
1965  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1966  stuffing_count + 50) {
1967  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1968  return -1;
1969  }
1970 
1971  switch (s->codec_id) {
1974  while (stuffing_count--) {
1975  put_bits(&s->pb, 8, 0);
1976  }
1977  break;
1978  case AV_CODEC_ID_MPEG4:
1979  put_bits(&s->pb, 16, 0);
1980  put_bits(&s->pb, 16, 0x1C3);
1981  stuffing_count -= 4;
1982  while (stuffing_count--) {
1983  put_bits(&s->pb, 8, 0xFF);
1984  }
1985  break;
1986  default:
1987  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1988  }
1989  flush_put_bits(&s->pb);
1990  s->frame_bits = put_bits_count(&s->pb);
1991  }
1992 
1993  /* update MPEG-1/2 vbv_delay for CBR */
1994  if (s->avctx->rc_max_rate &&
1995  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1996  s->out_format == FMT_MPEG1 &&
1997  90000LL * (avctx->rc_buffer_size - 1) <=
1998  s->avctx->rc_max_rate * 0xFFFFLL) {
1999  AVCPBProperties *props;
2000  size_t props_size;
2001 
2002  int vbv_delay, min_delay;
2003  double inbits = s->avctx->rc_max_rate *
2004  av_q2d(s->avctx->time_base);
2005  int minbits = s->frame_bits - 8 *
2006  (s->vbv_delay_ptr - s->pb.buf - 1);
2007  double bits = s->rc_context.buffer_index + minbits - inbits;
2008 
2009  if (bits < 0)
2010  av_log(s->avctx, AV_LOG_ERROR,
2011  "Internal error, negative bits\n");
2012 
2013  av_assert1(s->repeat_first_field == 0);
2014 
2015  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2016  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2017  s->avctx->rc_max_rate;
2018 
2019  vbv_delay = FFMAX(vbv_delay, min_delay);
2020 
2021  av_assert0(vbv_delay < 0xFFFF);
2022 
2023  s->vbv_delay_ptr[0] &= 0xF8;
2024  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2025  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2026  s->vbv_delay_ptr[2] &= 0x07;
2027  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2028 
2029  props = av_cpb_properties_alloc(&props_size);
2030  if (!props)
2031  return AVERROR(ENOMEM);
2032  props->vbv_delay = vbv_delay * 300;
2033 
2035  (uint8_t*)props, props_size);
2036  if (ret < 0) {
2037  av_freep(&props);
2038  return ret;
2039  }
2040 
2041 #if FF_API_VBV_DELAY
2043  avctx->vbv_delay = vbv_delay * 300;
2045 #endif
2046  }
2047  s->total_bits += s->frame_bits;
2048 #if FF_API_STAT_BITS
2050  avctx->frame_bits = s->frame_bits;
2052 #endif
2053 
2054 
2055  pkt->pts = s->current_picture.f->pts;
2056  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2057  if (!s->current_picture.f->coded_picture_number)
2058  pkt->dts = pkt->pts - s->dts_delta;
2059  else
2060  pkt->dts = s->reordered_pts;
2061  s->reordered_pts = pkt->pts;
2062  } else
2063  pkt->dts = pkt->pts;
2064  if (s->current_picture.f->key_frame)
2066  if (s->mb_info)
2068  } else {
2069  s->frame_bits = 0;
2070  }
2071 
2072  /* release non-reference frames */
2073  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2074  if (!s->picture[i].reference)
2075  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2076  }
2077 
2078  av_assert1((s->frame_bits & 7) == 0);
2079 
2080  pkt->size = s->frame_bits / 8;
2081  *got_packet = !!pkt->size;
2082  return 0;
2083 }
2084 
2086  int n, int threshold)
2087 {
2088  static const char tab[64] = {
2089  3, 2, 2, 1, 1, 1, 1, 1,
2090  1, 1, 1, 1, 1, 1, 1, 1,
2091  1, 1, 1, 1, 1, 1, 1, 1,
2092  0, 0, 0, 0, 0, 0, 0, 0,
2093  0, 0, 0, 0, 0, 0, 0, 0,
2094  0, 0, 0, 0, 0, 0, 0, 0,
2095  0, 0, 0, 0, 0, 0, 0, 0,
2096  0, 0, 0, 0, 0, 0, 0, 0
2097  };
2098  int score = 0;
2099  int run = 0;
2100  int i;
2101  int16_t *block = s->block[n];
2102  const int last_index = s->block_last_index[n];
2103  int skip_dc;
2104 
2105  if (threshold < 0) {
2106  skip_dc = 0;
2107  threshold = -threshold;
2108  } else
2109  skip_dc = 1;
2110 
2111  /* Are all we could set to zero already zero? */
2112  if (last_index <= skip_dc - 1)
2113  return;
2114 
2115  for (i = 0; i <= last_index; i++) {
2116  const int j = s->intra_scantable.permutated[i];
2117  const int level = FFABS(block[j]);
2118  if (level == 1) {
2119  if (skip_dc && i == 0)
2120  continue;
2121  score += tab[run];
2122  run = 0;
2123  } else if (level > 1) {
2124  return;
2125  } else {
2126  run++;
2127  }
2128  }
2129  if (score >= threshold)
2130  return;
2131  for (i = skip_dc; i <= last_index; i++) {
2132  const int j = s->intra_scantable.permutated[i];
2133  block[j] = 0;
2134  }
2135  if (block[0])
2136  s->block_last_index[n] = 0;
2137  else
2138  s->block_last_index[n] = -1;
2139 }
2140 
2141 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2142  int last_index)
2143 {
2144  int i;
2145  const int maxlevel = s->max_qcoeff;
2146  const int minlevel = s->min_qcoeff;
2147  int overflow = 0;
2148 
2149  if (s->mb_intra) {
2150  i = 1; // skip clipping of intra dc
2151  } else
2152  i = 0;
2153 
2154  for (; i <= last_index; i++) {
2155  const int j = s->intra_scantable.permutated[i];
2156  int level = block[j];
2157 
2158  if (level > maxlevel) {
2159  level = maxlevel;
2160  overflow++;
2161  } else if (level < minlevel) {
2162  level = minlevel;
2163  overflow++;
2164  }
2165 
2166  block[j] = level;
2167  }
2168 
2169  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2170  av_log(s->avctx, AV_LOG_INFO,
2171  "warning, clipping %d dct coefficients to %d..%d\n",
2172  overflow, minlevel, maxlevel);
2173 }
2174 
2175 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2176 {
2177  int x, y;
2178  // FIXME optimize
2179  for (y = 0; y < 8; y++) {
2180  for (x = 0; x < 8; x++) {
2181  int x2, y2;
2182  int sum = 0;
2183  int sqr = 0;
2184  int count = 0;
2185 
2186  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2187  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2188  int v = ptr[x2 + y2 * stride];
2189  sum += v;
2190  sqr += v * v;
2191  count++;
2192  }
2193  }
2194  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2195  }
2196  }
2197 }
2198 
2200  int motion_x, int motion_y,
2201  int mb_block_height,
2202  int mb_block_width,
2203  int mb_block_count)
2204 {
2205  int16_t weight[12][64];
2206  int16_t orig[12][64];
2207  const int mb_x = s->mb_x;
2208  const int mb_y = s->mb_y;
2209  int i;
2210  int skip_dct[12];
2211  int dct_offset = s->linesize * 8; // default for progressive frames
2212  int uv_dct_offset = s->uvlinesize * 8;
2213  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2214  ptrdiff_t wrap_y, wrap_c;
2215 
2216  for (i = 0; i < mb_block_count; i++)
2217  skip_dct[i] = s->skipdct;
2218 
2219  if (s->adaptive_quant) {
2220  const int last_qp = s->qscale;
2221  const int mb_xy = mb_x + mb_y * s->mb_stride;
2222 
2223  s->lambda = s->lambda_table[mb_xy];
2224  update_qscale(s);
2225 
2226  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2227  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2228  s->dquant = s->qscale - last_qp;
2229 
2230  if (s->out_format == FMT_H263) {
2231  s->dquant = av_clip(s->dquant, -2, 2);
2232 
2233  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2234  if (!s->mb_intra) {
2235  if (s->pict_type == AV_PICTURE_TYPE_B) {
2236  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2237  s->dquant = 0;
2238  }
2239  if (s->mv_type == MV_TYPE_8X8)
2240  s->dquant = 0;
2241  }
2242  }
2243  }
2244  }
2245  ff_set_qscale(s, last_qp + s->dquant);
2246  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2247  ff_set_qscale(s, s->qscale + s->dquant);
2248 
2249  wrap_y = s->linesize;
2250  wrap_c = s->uvlinesize;
2251  ptr_y = s->new_picture.f->data[0] +
2252  (mb_y * 16 * wrap_y) + mb_x * 16;
2253  ptr_cb = s->new_picture.f->data[1] +
2254  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2255  ptr_cr = s->new_picture.f->data[2] +
2256  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2257 
2258  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2259  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2260  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2261  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2262  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2263  wrap_y, wrap_y,
2264  16, 16, mb_x * 16, mb_y * 16,
2265  s->width, s->height);
2266  ptr_y = ebuf;
2267  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2268  wrap_c, wrap_c,
2269  mb_block_width, mb_block_height,
2270  mb_x * mb_block_width, mb_y * mb_block_height,
2271  cw, ch);
2272  ptr_cb = ebuf + 16 * wrap_y;
2273  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2274  wrap_c, wrap_c,
2275  mb_block_width, mb_block_height,
2276  mb_x * mb_block_width, mb_y * mb_block_height,
2277  cw, ch);
2278  ptr_cr = ebuf + 16 * wrap_y + 16;
2279  }
2280 
2281  if (s->mb_intra) {
2282  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2283  int progressive_score, interlaced_score;
2284 
2285  s->interlaced_dct = 0;
2286  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2287  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2288  NULL, wrap_y, 8) - 400;
2289 
2290  if (progressive_score > 0) {
2291  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2292  NULL, wrap_y * 2, 8) +
2293  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2294  NULL, wrap_y * 2, 8);
2295  if (progressive_score > interlaced_score) {
2296  s->interlaced_dct = 1;
2297 
2298  dct_offset = wrap_y;
2299  uv_dct_offset = wrap_c;
2300  wrap_y <<= 1;
2301  if (s->chroma_format == CHROMA_422 ||
2302  s->chroma_format == CHROMA_444)
2303  wrap_c <<= 1;
2304  }
2305  }
2306  }
2307 
2308  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2309  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2310  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2311  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2312 
2313  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2314  skip_dct[4] = 1;
2315  skip_dct[5] = 1;
2316  } else {
2317  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2318  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2319  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2320  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2321  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2322  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2323  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2324  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2325  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2326  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2327  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2328  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2329  }
2330  }
2331  } else {
2332  op_pixels_func (*op_pix)[4];
2333  qpel_mc_func (*op_qpix)[16];
2334  uint8_t *dest_y, *dest_cb, *dest_cr;
2335 
2336  dest_y = s->dest[0];
2337  dest_cb = s->dest[1];
2338  dest_cr = s->dest[2];
2339 
2340  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2341  op_pix = s->hdsp.put_pixels_tab;
2342  op_qpix = s->qdsp.put_qpel_pixels_tab;
2343  } else {
2344  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2345  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2346  }
2347 
2348  if (s->mv_dir & MV_DIR_FORWARD) {
2349  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2350  s->last_picture.f->data,
2351  op_pix, op_qpix);
2352  op_pix = s->hdsp.avg_pixels_tab;
2353  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2354  }
2355  if (s->mv_dir & MV_DIR_BACKWARD) {
2356  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2357  s->next_picture.f->data,
2358  op_pix, op_qpix);
2359  }
2360 
2361  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2362  int progressive_score, interlaced_score;
2363 
2364  s->interlaced_dct = 0;
2365  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2366  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2367  ptr_y + wrap_y * 8,
2368  wrap_y, 8) - 400;
2369 
2370  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2371  progressive_score -= 400;
2372 
2373  if (progressive_score > 0) {
2374  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2375  wrap_y * 2, 8) +
2376  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2377  ptr_y + wrap_y,
2378  wrap_y * 2, 8);
2379 
2380  if (progressive_score > interlaced_score) {
2381  s->interlaced_dct = 1;
2382 
2383  dct_offset = wrap_y;
2384  uv_dct_offset = wrap_c;
2385  wrap_y <<= 1;
2386  if (s->chroma_format == CHROMA_422)
2387  wrap_c <<= 1;
2388  }
2389  }
2390  }
2391 
2392  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2393  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2394  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2395  dest_y + dct_offset, wrap_y);
2396  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2397  dest_y + dct_offset + 8, wrap_y);
2398 
2399  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2400  skip_dct[4] = 1;
2401  skip_dct[5] = 1;
2402  } else {
2403  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2404  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2405  if (!s->chroma_y_shift) { /* 422 */
2406  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2407  dest_cb + uv_dct_offset, wrap_c);
2408  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2409  dest_cr + uv_dct_offset, wrap_c);
2410  }
2411  }
2412  /* pre quantization */
2413  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2414  2 * s->qscale * s->qscale) {
2415  // FIXME optimize
2416  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2417  skip_dct[0] = 1;
2418  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2419  skip_dct[1] = 1;
2420  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2421  wrap_y, 8) < 20 * s->qscale)
2422  skip_dct[2] = 1;
2423  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2424  wrap_y, 8) < 20 * s->qscale)
2425  skip_dct[3] = 1;
2426  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2427  skip_dct[4] = 1;
2428  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2429  skip_dct[5] = 1;
2430  if (!s->chroma_y_shift) { /* 422 */
2431  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2432  dest_cb + uv_dct_offset,
2433  wrap_c, 8) < 20 * s->qscale)
2434  skip_dct[6] = 1;
2435  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2436  dest_cr + uv_dct_offset,
2437  wrap_c, 8) < 20 * s->qscale)
2438  skip_dct[7] = 1;
2439  }
2440  }
2441  }
2442 
2443  if (s->quantizer_noise_shaping) {
2444  if (!skip_dct[0])
2445  get_visual_weight(weight[0], ptr_y , wrap_y);
2446  if (!skip_dct[1])
2447  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2448  if (!skip_dct[2])
2449  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2450  if (!skip_dct[3])
2451  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2452  if (!skip_dct[4])
2453  get_visual_weight(weight[4], ptr_cb , wrap_c);
2454  if (!skip_dct[5])
2455  get_visual_weight(weight[5], ptr_cr , wrap_c);
2456  if (!s->chroma_y_shift) { /* 422 */
2457  if (!skip_dct[6])
2458  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2459  wrap_c);
2460  if (!skip_dct[7])
2461  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2462  wrap_c);
2463  }
2464  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2465  }
2466 
2467  /* DCT & quantize */
2468  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2469  {
2470  for (i = 0; i < mb_block_count; i++) {
2471  if (!skip_dct[i]) {
2472  int overflow;
2473  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2474  // FIXME we could decide to change to quantizer instead of
2475  // clipping
2476  // JS: I don't think that would be a good idea it could lower
2477  // quality instead of improve it. Just INTRADC clipping
2478  // deserves changes in quantizer
2479  if (overflow)
2480  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2481  } else
2482  s->block_last_index[i] = -1;
2483  }
2484  if (s->quantizer_noise_shaping) {
2485  for (i = 0; i < mb_block_count; i++) {
2486  if (!skip_dct[i]) {
2487  s->block_last_index[i] =
2488  dct_quantize_refine(s, s->block[i], weight[i],
2489  orig[i], i, s->qscale);
2490  }
2491  }
2492  }
2493 
2494  if (s->luma_elim_threshold && !s->mb_intra)
2495  for (i = 0; i < 4; i++)
2496  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2497  if (s->chroma_elim_threshold && !s->mb_intra)
2498  for (i = 4; i < mb_block_count; i++)
2499  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2500 
2501  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2502  for (i = 0; i < mb_block_count; i++) {
2503  if (s->block_last_index[i] == -1)
2504  s->coded_score[i] = INT_MAX / 256;
2505  }
2506  }
2507  }
2508 
2509  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2510  s->block_last_index[4] =
2511  s->block_last_index[5] = 0;
2512  s->block[4][0] =
2513  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2514  if (!s->chroma_y_shift) { /* 422 / 444 */
2515  for (i=6; i<12; i++) {
2516  s->block_last_index[i] = 0;
2517  s->block[i][0] = s->block[4][0];
2518  }
2519  }
2520  }
2521 
2522  // non c quantize code returns incorrect block_last_index FIXME
2523  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2524  for (i = 0; i < mb_block_count; i++) {
2525  int j;
2526  if (s->block_last_index[i] > 0) {
2527  for (j = 63; j > 0; j--) {
2528  if (s->block[i][s->intra_scantable.permutated[j]])
2529  break;
2530  }
2531  s->block_last_index[i] = j;
2532  }
2533  }
2534  }
2535 
2536  /* huffman encode */
2537  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2540  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2541  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2542  break;
2543  case AV_CODEC_ID_MPEG4:
2544  if (CONFIG_MPEG4_ENCODER)
2545  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2546  break;
2547  case AV_CODEC_ID_MSMPEG4V2:
2548  case AV_CODEC_ID_MSMPEG4V3:
2549  case AV_CODEC_ID_WMV1:
2551  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2552  break;
2553  case AV_CODEC_ID_WMV2:
2554  if (CONFIG_WMV2_ENCODER)
2555  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2556  break;
2557  case AV_CODEC_ID_H261:
2558  if (CONFIG_H261_ENCODER)
2559  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2560  break;
2561  case AV_CODEC_ID_H263:
2562  case AV_CODEC_ID_H263P:
2563  case AV_CODEC_ID_FLV1:
2564  case AV_CODEC_ID_RV10:
2565  case AV_CODEC_ID_RV20:
2566  if (CONFIG_H263_ENCODER)
2567  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2568  break;
2569  case AV_CODEC_ID_MJPEG:
2570  case AV_CODEC_ID_AMV:
2571  if (CONFIG_MJPEG_ENCODER)
2572  ff_mjpeg_encode_mb(s, s->block);
2573  break;
2574  default:
2575  av_assert1(0);
2576  }
2577 }
2578 
2579 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2580 {
2581  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2582  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2583  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2584 }
2585 
2587  int i;
2588 
2589  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2590 
2591  /* MPEG-1 */
2592  d->mb_skip_run= s->mb_skip_run;
2593  for(i=0; i<3; i++)
2594  d->last_dc[i] = s->last_dc[i];
2595 
2596  /* statistics */
2597  d->mv_bits= s->mv_bits;
2598  d->i_tex_bits= s->i_tex_bits;
2599  d->p_tex_bits= s->p_tex_bits;
2600  d->i_count= s->i_count;
2601  d->f_count= s->f_count;
2602  d->b_count= s->b_count;
2603  d->skip_count= s->skip_count;
2604  d->misc_bits= s->misc_bits;
2605  d->last_bits= 0;
2606 
2607  d->mb_skipped= 0;
2608  d->qscale= s->qscale;
2609  d->dquant= s->dquant;
2610 
2611  d->esc3_level_length= s->esc3_level_length;
2612 }
2613 
2615  int i;
2616 
2617  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2618  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2619 
2620  /* MPEG-1 */
2621  d->mb_skip_run= s->mb_skip_run;
2622  for(i=0; i<3; i++)
2623  d->last_dc[i] = s->last_dc[i];
2624 
2625  /* statistics */
2626  d->mv_bits= s->mv_bits;
2627  d->i_tex_bits= s->i_tex_bits;
2628  d->p_tex_bits= s->p_tex_bits;
2629  d->i_count= s->i_count;
2630  d->f_count= s->f_count;
2631  d->b_count= s->b_count;
2632  d->skip_count= s->skip_count;
2633  d->misc_bits= s->misc_bits;
2634 
2635  d->mb_intra= s->mb_intra;
2636  d->mb_skipped= s->mb_skipped;
2637  d->mv_type= s->mv_type;
2638  d->mv_dir= s->mv_dir;
2639  d->pb= s->pb;
2640  if(s->data_partitioning){
2641  d->pb2= s->pb2;
2642  d->tex_pb= s->tex_pb;
2643  }
2644  d->block= s->block;
2645  for(i=0; i<8; i++)
2646  d->block_last_index[i]= s->block_last_index[i];
2647  d->interlaced_dct= s->interlaced_dct;
2648  d->qscale= s->qscale;
2649 
2650  d->esc3_level_length= s->esc3_level_length;
2651 }
2652 
2653 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2655  int *dmin, int *next_block, int motion_x, int motion_y)
2656 {
2657  int score;
2658  uint8_t *dest_backup[3];
2659 
2660  copy_context_before_encode(s, backup, type);
2661 
2662  s->block= s->blocks[*next_block];
2663  s->pb= pb[*next_block];
2664  if(s->data_partitioning){
2665  s->pb2 = pb2 [*next_block];
2666  s->tex_pb= tex_pb[*next_block];
2667  }
2668 
2669  if(*next_block){
2670  memcpy(dest_backup, s->dest, sizeof(s->dest));
2671  s->dest[0] = s->sc.rd_scratchpad;
2672  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2673  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2674  av_assert0(s->linesize >= 32); //FIXME
2675  }
2676 
2677  encode_mb(s, motion_x, motion_y);
2678 
2679  score= put_bits_count(&s->pb);
2680  if(s->data_partitioning){
2681  score+= put_bits_count(&s->pb2);
2682  score+= put_bits_count(&s->tex_pb);
2683  }
2684 
2685  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2686  ff_mpv_reconstruct_mb(s, s->block);
2687 
2688  score *= s->lambda2;
2689  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2690  }
2691 
2692  if(*next_block){
2693  memcpy(s->dest, dest_backup, sizeof(s->dest));
2694  }
2695 
2696  if(score<*dmin){
2697  *dmin= score;
2698  *next_block^=1;
2699 
2701  }
2702 }
2703 
2704 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2705  const uint32_t *sq = ff_square_tab + 256;
2706  int acc=0;
2707  int x,y;
2708 
2709  if(w==16 && h==16)
2710  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2711  else if(w==8 && h==8)
2712  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2713 
2714  for(y=0; y<h; y++){
2715  for(x=0; x<w; x++){
2716  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2717  }
2718  }
2719 
2720  av_assert2(acc>=0);
2721 
2722  return acc;
2723 }
2724 
2725 static int sse_mb(MpegEncContext *s){
2726  int w= 16;
2727  int h= 16;
2728 
2729  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2730  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2731 
2732  if(w==16 && h==16)
2733  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2734  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2735  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2736  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2737  }else{
2738  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2739  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2740  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2741  }
2742  else
2743  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2744  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2745  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2746 }
2747 
2749  MpegEncContext *s= *(void**)arg;
2750 
2751 
2752  s->me.pre_pass=1;
2753  s->me.dia_size= s->avctx->pre_dia_size;
2754  s->first_slice_line=1;
2755  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2756  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2757  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2758  }
2759  s->first_slice_line=0;
2760  }
2761 
2762  s->me.pre_pass=0;
2763 
2764  return 0;
2765 }
2766 
2768  MpegEncContext *s= *(void**)arg;
2769 
2771 
2772  s->me.dia_size= s->avctx->dia_size;
2773  s->first_slice_line=1;
2774  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2775  s->mb_x=0; //for block init below
2777  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2778  s->block_index[0]+=2;
2779  s->block_index[1]+=2;
2780  s->block_index[2]+=2;
2781  s->block_index[3]+=2;
2782 
2783  /* compute motion vector & mb_type and store in context */
2784  if(s->pict_type==AV_PICTURE_TYPE_B)
2785  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2786  else
2787  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2788  }
2789  s->first_slice_line=0;
2790  }
2791  return 0;
2792 }
2793 
2794 static int mb_var_thread(AVCodecContext *c, void *arg){
2795  MpegEncContext *s= *(void**)arg;
2796  int mb_x, mb_y;
2797 
2799 
2800  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2801  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2802  int xx = mb_x * 16;
2803  int yy = mb_y * 16;
2804  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2805  int varc;
2806  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2807 
2808  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2809  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2810 
2811  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2812  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2813  s->me.mb_var_sum_temp += varc;
2814  }
2815  }
2816  return 0;
2817 }
2818 
2820  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2821  if(s->partitioned_frame){
2823  }
2824 
2825  ff_mpeg4_stuffing(&s->pb);
2826  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2828  }
2829 
2830  avpriv_align_put_bits(&s->pb);
2831  flush_put_bits(&s->pb);
2832 
2833  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2834  s->misc_bits+= get_bits_diff(s);
2835 }
2836 
2838 {
2839  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2840  int offset = put_bits_count(&s->pb);
2841  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2842  int gobn = s->mb_y / s->gob_index;
2843  int pred_x, pred_y;
2844  if (CONFIG_H263_ENCODER)
2845  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2846  bytestream_put_le32(&ptr, offset);
2847  bytestream_put_byte(&ptr, s->qscale);
2848  bytestream_put_byte(&ptr, gobn);
2849  bytestream_put_le16(&ptr, mba);
2850  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2851  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2852  /* 4MV not implemented */
2853  bytestream_put_byte(&ptr, 0); /* hmv2 */
2854  bytestream_put_byte(&ptr, 0); /* vmv2 */
2855 }
2856 
2857 static void update_mb_info(MpegEncContext *s, int startcode)
2858 {
2859  if (!s->mb_info)
2860  return;
2861  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2862  s->mb_info_size += 12;
2863  s->prev_mb_info = s->last_mb_info;
2864  }
2865  if (startcode) {
2866  s->prev_mb_info = put_bits_count(&s->pb)/8;
2867  /* This might have incremented mb_info_size above, and we return without
2868  * actually writing any info into that slot yet. But in that case,
2869  * this will be called again at the start of the after writing the
2870  * start code, actually writing the mb info. */
2871  return;
2872  }
2873 
2874  s->last_mb_info = put_bits_count(&s->pb)/8;
2875  if (!s->mb_info_size)
2876  s->mb_info_size += 12;
2877  write_mb_info(s);
2878 }
2879 
2880 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2881 {
2882  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2883  && s->slice_context_count == 1
2884  && s->pb.buf == s->avctx->internal->byte_buffer) {
2885  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2886  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2887 
2888  uint8_t *new_buffer = NULL;
2889  int new_buffer_size = 0;
2890 
2891  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2892  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2893  return AVERROR(ENOMEM);
2894  }
2895 
2896  emms_c();
2897 
2898  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2899  s->avctx->internal->byte_buffer_size + size_increase);
2900  if (!new_buffer)
2901  return AVERROR(ENOMEM);
2902 
2903  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2904  av_free(s->avctx->internal->byte_buffer);
2905  s->avctx->internal->byte_buffer = new_buffer;
2906  s->avctx->internal->byte_buffer_size = new_buffer_size;
2907  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2908  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2909  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2910  }
2911  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2912  return AVERROR(EINVAL);
2913  return 0;
2914 }
2915 
2916 static int encode_thread(AVCodecContext *c, void *arg){
2917  MpegEncContext *s= *(void**)arg;
2918  int mb_x, mb_y;
2919  int chr_h= 16>>s->chroma_y_shift;
2920  int i, j;
2921  MpegEncContext best_s = { 0 }, backup_s;
2922  uint8_t bit_buf[2][MAX_MB_BYTES];
2923  uint8_t bit_buf2[2][MAX_MB_BYTES];
2924  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2925  PutBitContext pb[2], pb2[2], tex_pb[2];
2926 
2928 
2929  for(i=0; i<2; i++){
2930  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2931  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2932  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2933  }
2934 
2935  s->last_bits= put_bits_count(&s->pb);
2936  s->mv_bits=0;
2937  s->misc_bits=0;
2938  s->i_tex_bits=0;
2939  s->p_tex_bits=0;
2940  s->i_count=0;
2941  s->f_count=0;
2942  s->b_count=0;
2943  s->skip_count=0;
2944 
2945  for(i=0; i<3; i++){
2946  /* init last dc values */
2947  /* note: quant matrix value (8) is implied here */
2948  s->last_dc[i] = 128 << s->intra_dc_precision;
2949 
2950  s->current_picture.encoding_error[i] = 0;
2951  }
2952  if(s->codec_id==AV_CODEC_ID_AMV){
2953  s->last_dc[0] = 128*8/13;
2954  s->last_dc[1] = 128*8/14;
2955  s->last_dc[2] = 128*8/14;
2956  }
2957  s->mb_skip_run = 0;
2958  memset(s->last_mv, 0, sizeof(s->last_mv));
2959 
2960  s->last_mv_dir = 0;
2961 
2962  switch(s->codec_id){
2963  case AV_CODEC_ID_H263:
2964  case AV_CODEC_ID_H263P:
2965  case AV_CODEC_ID_FLV1:
2966  if (CONFIG_H263_ENCODER)
2967  s->gob_index = H263_GOB_HEIGHT(s->height);
2968  break;
2969  case AV_CODEC_ID_MPEG4:
2970  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2972  break;
2973  }
2974 
2975  s->resync_mb_x=0;
2976  s->resync_mb_y=0;
2977  s->first_slice_line = 1;
2978  s->ptr_lastgob = s->pb.buf;
2979  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2980  s->mb_x=0;
2981  s->mb_y= mb_y;
2982 
2983  ff_set_qscale(s, s->qscale);
2985 
2986  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2987  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2988  int mb_type= s->mb_type[xy];
2989 // int d;
2990  int dmin= INT_MAX;
2991  int dir;
2992  int size_increase = s->avctx->internal->byte_buffer_size/4
2993  + s->mb_width*MAX_MB_BYTES;
2994 
2996  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2997  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2998  return -1;
2999  }
3000  if(s->data_partitioning){
3001  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3002  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3003  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3004  return -1;
3005  }
3006  }
3007 
3008  s->mb_x = mb_x;
3009  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3011 
3012  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3014  xy= s->mb_y*s->mb_stride + s->mb_x;
3015  mb_type= s->mb_type[xy];
3016  }
3017 
3018  /* write gob / video packet header */
3019  if(s->rtp_mode){
3020  int current_packet_size, is_gob_start;
3021 
3022  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3023 
3024  is_gob_start = s->rtp_payload_size &&
3025  current_packet_size >= s->rtp_payload_size &&
3026  mb_y + mb_x > 0;
3027 
3028  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3029 
3030  switch(s->codec_id){
3031  case AV_CODEC_ID_H263:
3032  case AV_CODEC_ID_H263P:
3033  if(!s->h263_slice_structured)
3034  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3035  break;
3037  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3039  if(s->mb_skip_run) is_gob_start=0;
3040  break;
3041  case AV_CODEC_ID_MJPEG:
3042  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3043  break;
3044  }
3045 
3046  if(is_gob_start){
3047  if(s->start_mb_y != mb_y || mb_x!=0){
3048  write_slice_end(s);
3049 
3050  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3052  }
3053  }
3054 
3055  av_assert2((put_bits_count(&s->pb)&7) == 0);
3056  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3057 
3058  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3059  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3060  int d = 100 / s->error_rate;
3061  if(r % d == 0){
3062  current_packet_size=0;
3063  s->pb.buf_ptr= s->ptr_lastgob;
3064  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3065  }
3066  }
3067 
3068 #if FF_API_RTP_CALLBACK
3070  if (s->avctx->rtp_callback){
3071  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3072  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3073  }
3075 #endif
3076  update_mb_info(s, 1);
3077 
3078  switch(s->codec_id){
3079  case AV_CODEC_ID_MPEG4:
3080  if (CONFIG_MPEG4_ENCODER) {
3083  }
3084  break;
3087  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3090  }
3091  break;
3092  case AV_CODEC_ID_H263:
3093  case AV_CODEC_ID_H263P:
3094  if (CONFIG_H263_ENCODER)
3096  break;
3097  }
3098 
3099  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3100  int bits= put_bits_count(&s->pb);
3101  s->misc_bits+= bits - s->last_bits;
3102  s->last_bits= bits;
3103  }
3104 
3105  s->ptr_lastgob += current_packet_size;
3106  s->first_slice_line=1;
3107  s->resync_mb_x=mb_x;
3108  s->resync_mb_y=mb_y;
3109  }
3110  }
3111 
3112  if( (s->resync_mb_x == s->mb_x)
3113  && s->resync_mb_y+1 == s->mb_y){
3114  s->first_slice_line=0;
3115  }
3116 
3117  s->mb_skipped=0;
3118  s->dquant=0; //only for QP_RD
3119 
3120  update_mb_info(s, 0);
3121 
3122  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3123  int next_block=0;
3124  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3125 
3126  copy_context_before_encode(&backup_s, s, -1);
3127  backup_s.pb= s->pb;
3128  best_s.data_partitioning= s->data_partitioning;
3129  best_s.partitioned_frame= s->partitioned_frame;
3130  if(s->data_partitioning){
3131  backup_s.pb2= s->pb2;
3132  backup_s.tex_pb= s->tex_pb;
3133  }
3134 
3136  s->mv_dir = MV_DIR_FORWARD;
3137  s->mv_type = MV_TYPE_16X16;
3138  s->mb_intra= 0;
3139  s->mv[0][0][0] = s->p_mv_table[xy][0];
3140  s->mv[0][0][1] = s->p_mv_table[xy][1];
3141  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3142  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3143  }
3145  s->mv_dir = MV_DIR_FORWARD;
3146  s->mv_type = MV_TYPE_FIELD;
3147  s->mb_intra= 0;
3148  for(i=0; i<2; i++){
3149  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3150  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3151  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3152  }
3153  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3154  &dmin, &next_block, 0, 0);
3155  }
3157  s->mv_dir = MV_DIR_FORWARD;
3158  s->mv_type = MV_TYPE_16X16;
3159  s->mb_intra= 0;
3160  s->mv[0][0][0] = 0;
3161  s->mv[0][0][1] = 0;
3162  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3163  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3164  }
3166  s->mv_dir = MV_DIR_FORWARD;
3167  s->mv_type = MV_TYPE_8X8;
3168  s->mb_intra= 0;
3169  for(i=0; i<4; i++){
3170  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3171  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3172  }
3173  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3174  &dmin, &next_block, 0, 0);
3175  }
3177  s->mv_dir = MV_DIR_FORWARD;
3178  s->mv_type = MV_TYPE_16X16;
3179  s->mb_intra= 0;
3180  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3181  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3182  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3183  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3184  }
3186  s->mv_dir = MV_DIR_BACKWARD;
3187  s->mv_type = MV_TYPE_16X16;
3188  s->mb_intra= 0;
3189  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3190  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3191  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3192  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3193  }
3195  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3196  s->mv_type = MV_TYPE_16X16;
3197  s->mb_intra= 0;
3198  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3199  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3200  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3201  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3202  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3203  &dmin, &next_block, 0, 0);
3204  }
3206  s->mv_dir = MV_DIR_FORWARD;
3207  s->mv_type = MV_TYPE_FIELD;
3208  s->mb_intra= 0;
3209  for(i=0; i<2; i++){
3210  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3211  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3212  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3213  }
3214  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3215  &dmin, &next_block, 0, 0);
3216  }
3218  s->mv_dir = MV_DIR_BACKWARD;
3219  s->mv_type = MV_TYPE_FIELD;
3220  s->mb_intra= 0;
3221  for(i=0; i<2; i++){
3222  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3223  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3224  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3225  }
3226  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3227  &dmin, &next_block, 0, 0);
3228  }
3230  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3231  s->mv_type = MV_TYPE_FIELD;
3232  s->mb_intra= 0;
3233  for(dir=0; dir<2; dir++){
3234  for(i=0; i<2; i++){
3235  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3236  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3237  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3238  }
3239  }
3240  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3241  &dmin, &next_block, 0, 0);
3242  }
3244  s->mv_dir = 0;
3245  s->mv_type = MV_TYPE_16X16;
3246  s->mb_intra= 1;
3247  s->mv[0][0][0] = 0;
3248  s->mv[0][0][1] = 0;
3249  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3250  &dmin, &next_block, 0, 0);
3251  if(s->h263_pred || s->h263_aic){
3252  if(best_s.mb_intra)
3253  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3254  else
3255  ff_clean_intra_table_entries(s); //old mode?
3256  }
3257  }
3258 
3259  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3260  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3261  const int last_qp= backup_s.qscale;
3262  int qpi, qp, dc[6];
3263  int16_t ac[6][16];
3264  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3265  static const int dquant_tab[4]={-1,1,-2,2};
3266  int storecoefs = s->mb_intra && s->dc_val[0];
3267 
3268  av_assert2(backup_s.dquant == 0);
3269 
3270  //FIXME intra
3271  s->mv_dir= best_s.mv_dir;
3272  s->mv_type = MV_TYPE_16X16;
3273  s->mb_intra= best_s.mb_intra;
3274  s->mv[0][0][0] = best_s.mv[0][0][0];
3275  s->mv[0][0][1] = best_s.mv[0][0][1];
3276  s->mv[1][0][0] = best_s.mv[1][0][0];
3277  s->mv[1][0][1] = best_s.mv[1][0][1];
3278 
3279  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3280  for(; qpi<4; qpi++){
3281  int dquant= dquant_tab[qpi];
3282  qp= last_qp + dquant;
3283  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3284  continue;
3285  backup_s.dquant= dquant;
3286  if(storecoefs){
3287  for(i=0; i<6; i++){
3288  dc[i]= s->dc_val[0][ s->block_index[i] ];
3289  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3290  }
3291  }
3292 
3293  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3294  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3295  if(best_s.qscale != qp){
3296  if(storecoefs){
3297  for(i=0; i<6; i++){
3298  s->dc_val[0][ s->block_index[i] ]= dc[i];
3299  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3300  }
3301  }
3302  }
3303  }
3304  }
3305  }
3306  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3307  int mx= s->b_direct_mv_table[xy][0];
3308  int my= s->b_direct_mv_table[xy][1];
3309 
3310  backup_s.dquant = 0;
3311  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3312  s->mb_intra= 0;
3313  ff_mpeg4_set_direct_mv(s, mx, my);
3314  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3315  &dmin, &next_block, mx, my);
3316  }
3317  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3318  backup_s.dquant = 0;
3319  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3320  s->mb_intra= 0;
3321  ff_mpeg4_set_direct_mv(s, 0, 0);
3322  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3323  &dmin, &next_block, 0, 0);
3324  }
3325  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3326  int coded=0;
3327  for(i=0; i<6; i++)
3328  coded |= s->block_last_index[i];
3329  if(coded){
3330  int mx,my;
3331  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3332  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3333  mx=my=0; //FIXME find the one we actually used
3334  ff_mpeg4_set_direct_mv(s, mx, my);
3335  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3336  mx= s->mv[1][0][0];
3337  my= s->mv[1][0][1];
3338  }else{
3339  mx= s->mv[0][0][0];
3340  my= s->mv[0][0][1];
3341  }
3342 
3343  s->mv_dir= best_s.mv_dir;
3344  s->mv_type = best_s.mv_type;
3345  s->mb_intra= 0;
3346 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3347  s->mv[0][0][1] = best_s.mv[0][0][1];
3348  s->mv[1][0][0] = best_s.mv[1][0][0];
3349  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3350  backup_s.dquant= 0;
3351  s->skipdct=1;
3352  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3353  &dmin, &next_block, mx, my);
3354  s->skipdct=0;
3355  }
3356  }
3357 
3358  s->current_picture.qscale_table[xy] = best_s.qscale;
3359 
3360  copy_context_after_encode(s, &best_s, -1);
3361 
3362  pb_bits_count= put_bits_count(&s->pb);
3363  flush_put_bits(&s->pb);
3364  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3365  s->pb= backup_s.pb;
3366 
3367  if(s->data_partitioning){
3368  pb2_bits_count= put_bits_count(&s->pb2);
3369  flush_put_bits(&s->pb2);
3370  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3371  s->pb2= backup_s.pb2;
3372 
3373  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3374  flush_put_bits(&s->tex_pb);
3375  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3376  s->tex_pb= backup_s.tex_pb;
3377  }
3378  s->last_bits= put_bits_count(&s->pb);
3379 
3380  if (CONFIG_H263_ENCODER &&
3381  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3383 
3384  if(next_block==0){ //FIXME 16 vs linesize16
3385  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3386  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3387  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3388  }
3389 
3390  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3391  ff_mpv_reconstruct_mb(s, s->block);
3392  } else {
3393  int motion_x = 0, motion_y = 0;
3394  s->mv_type=MV_TYPE_16X16;
3395  // only one MB-Type possible
3396 
3397  switch(mb_type){
3399  s->mv_dir = 0;
3400  s->mb_intra= 1;
3401  motion_x= s->mv[0][0][0] = 0;
3402  motion_y= s->mv[0][0][1] = 0;
3403  break;
3405  s->mv_dir = MV_DIR_FORWARD;
3406  s->mb_intra= 0;
3407  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3408  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3409  break;
3411  s->mv_dir = MV_DIR_FORWARD;
3412  s->mv_type = MV_TYPE_FIELD;
3413  s->mb_intra= 0;
3414  for(i=0; i<2; i++){
3415  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3416  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3417  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3418  }
3419  break;
3421  s->mv_dir = MV_DIR_FORWARD;
3422  s->mv_type = MV_TYPE_8X8;
3423  s->mb_intra= 0;
3424  for(i=0; i<4; i++){
3425  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3426  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3427  }
3428  break;
3430  if (CONFIG_MPEG4_ENCODER) {
3432  s->mb_intra= 0;
3433  motion_x=s->b_direct_mv_table[xy][0];
3434  motion_y=s->b_direct_mv_table[xy][1];
3435  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3436  }
3437  break;
3439  if (CONFIG_MPEG4_ENCODER) {
3441  s->mb_intra= 0;
3442  ff_mpeg4_set_direct_mv(s, 0, 0);
3443  }
3444  break;
3446  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3447  s->mb_intra= 0;
3448  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3449  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3450  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3451  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3452  break;
3454  s->mv_dir = MV_DIR_BACKWARD;
3455  s->mb_intra= 0;
3456  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3457  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3458  break;
3460  s->mv_dir = MV_DIR_FORWARD;
3461  s->mb_intra= 0;
3462  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3463  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3464  break;
3466  s->mv_dir = MV_DIR_FORWARD;
3467  s->mv_type = MV_TYPE_FIELD;
3468  s->mb_intra= 0;
3469  for(i=0; i<2; i++){
3470  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3471  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3472  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3473  }
3474  break;
3476  s->mv_dir = MV_DIR_BACKWARD;
3477  s->mv_type = MV_TYPE_FIELD;
3478  s->mb_intra= 0;
3479  for(i=0; i<2; i++){
3480  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3481  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3482  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3483  }
3484  break;
3486  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3487  s->mv_type = MV_TYPE_FIELD;
3488  s->mb_intra= 0;
3489  for(dir=0; dir<2; dir++){
3490  for(i=0; i<2; i++){
3491  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3492  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3493  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3494  }
3495  }
3496  break;
3497  default:
3498  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3499  }
3500 
3501  encode_mb(s, motion_x, motion_y);
3502 
3503  // RAL: Update last macroblock type
3504  s->last_mv_dir = s->mv_dir;
3505 
3506  if (CONFIG_H263_ENCODER &&
3507  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3509 
3510  ff_mpv_reconstruct_mb(s, s->block);
3511  }
3512 
3513  /* clean the MV table in IPS frames for direct mode in B-frames */
3514  if(s->mb_intra /* && I,P,S_TYPE */){
3515  s->p_mv_table[xy][0]=0;
3516  s->p_mv_table[xy][1]=0;
3517  }
3518 
3519  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3520  int w= 16;
3521  int h= 16;
3522 
3523  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3524  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3525 
3526  s->current_picture.encoding_error[0] += sse(
3527  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3528  s->dest[0], w, h, s->linesize);
3529  s->current_picture.encoding_error[1] += sse(
3530  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3531  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3532  s->current_picture.encoding_error[2] += sse(
3533  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3534  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3535  }
3536  if(s->loop_filter){
3537  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3539  }
3540  ff_dlog(s->avctx, "MB %d %d bits\n",
3541  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3542  }
3543  }
3544 
3545  //not beautiful here but we must write it before flushing so it has to be here
3546  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3548 
3549  write_slice_end(s);
3550 
3551 #if FF_API_RTP_CALLBACK
3553  /* Send the last GOB if RTP */
3554  if (s->avctx->rtp_callback) {
3555  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3556  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3557  /* Call the RTP callback to send the last GOB */
3558  emms_c();
3559  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3560  }
3562 #endif
3563 
3564  return 0;
3565 }
3566 
3567 #define MERGE(field) dst->field += src->field; src->field=0
3569  MERGE(me.scene_change_score);
3570  MERGE(me.mc_mb_var_sum_temp);
3571  MERGE(me.mb_var_sum_temp);
3572 }
3573 
3575  int i;
3576 
3577  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3578  MERGE(dct_count[1]);
3579  MERGE(mv_bits);
3580  MERGE(i_tex_bits);
3581  MERGE(p_tex_bits);
3582  MERGE(i_count);
3583  MERGE(f_count);
3584  MERGE(b_count);
3585  MERGE(skip_count);
3586  MERGE(misc_bits);
3587  MERGE(er.error_count);
3592 
3593  if (dst->noise_reduction){
3594  for(i=0; i<64; i++){
3595  MERGE(dct_error_sum[0][i]);
3596  MERGE(dct_error_sum[1][i]);
3597  }
3598  }
3599 
3600  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3601  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3602  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3603  flush_put_bits(&dst->pb);
3604 }
3605 
3606 static int estimate_qp(MpegEncContext *s, int dry_run){
3607  if (s->next_lambda){
3608  s->current_picture_ptr->f->quality =
3609  s->current_picture.f->quality = s->next_lambda;
3610  if(!dry_run) s->next_lambda= 0;
3611  } else if (!s->fixed_qscale) {
3612  int quality = ff_rate_estimate_qscale(s, dry_run);
3613  s->current_picture_ptr->f->quality =
3614  s->current_picture.f->quality = quality;
3615  if (s->current_picture.f->quality < 0)
3616  return -1;
3617  }
3618 
3619  if(s->adaptive_quant){
3620  switch(s->codec_id){
3621  case AV_CODEC_ID_MPEG4:
3622  if (CONFIG_MPEG4_ENCODER)
3624  break;
3625  case AV_CODEC_ID_H263:
3626  case AV_CODEC_ID_H263P:
3627  case AV_CODEC_ID_FLV1:
3628  if (CONFIG_H263_ENCODER)
3630  break;
3631  default:
3633  }
3634 
3635  s->lambda= s->lambda_table[0];
3636  //FIXME broken
3637  }else
3638  s->lambda = s->current_picture.f->quality;
3639  update_qscale(s);
3640  return 0;
3641 }
3642 
3643 /* must be called before writing the header */
3645  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3646  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3647 
3648  if(s->pict_type==AV_PICTURE_TYPE_B){
3649  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3650  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3651  }else{
3652  s->pp_time= s->time - s->last_non_b_time;
3653  s->last_non_b_time= s->time;
3654  av_assert1(s->picture_number==0 || s->pp_time > 0);
3655  }
3656 }
3657 
3659 {
3660  int i, ret;
3661  int bits;
3662  int context_count = s->slice_context_count;
3663 
3664  s->picture_number = picture_number;
3665 
3666  /* Reset the average MB variance */
3667  s->me.mb_var_sum_temp =
3668  s->me.mc_mb_var_sum_temp = 0;
3669 
3670  /* we need to initialize some time vars before we can encode B-frames */
3671  // RAL: Condition added for MPEG1VIDEO
3672  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3674  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3676 
3677  s->me.scene_change_score=0;
3678 
3679 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3680 
3681  if(s->pict_type==AV_PICTURE_TYPE_I){
3682  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3683  else s->no_rounding=0;
3684  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3685  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3686  s->no_rounding ^= 1;
3687  }
3688 
3689  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3690  if (estimate_qp(s,1) < 0)
3691  return -1;
3693  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3694  if(s->pict_type==AV_PICTURE_TYPE_B)
3695  s->lambda= s->last_lambda_for[s->pict_type];
3696  else
3697  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3698  update_qscale(s);
3699  }
3700 
3701  if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3702  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3703  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3704  s->q_chroma_intra_matrix = s->q_intra_matrix;
3705  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3706  }
3707 
3708  s->mb_intra=0; //for the rate distortion & bit compare functions
3709  for(i=1; i<context_count; i++){
3710  ret = ff_update_duplicate_context(s->thread_context[i], s);
3711  if (ret < 0)
3712  return ret;
3713  }
3714 
3715  if(ff_init_me(s)<0)
3716  return -1;
3717 
3718  /* Estimate motion for every MB */
3719  if(s->pict_type != AV_PICTURE_TYPE_I){
3720  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3721  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3722  if (s->pict_type != AV_PICTURE_TYPE_B) {
3723  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3724  s->me_pre == 2) {
3725  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3726  }
3727  }
3728 
3729  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3730  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3731  /* I-Frame */
3732  for(i=0; i<s->mb_stride*s->mb_height; i++)
3733  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3734 
3735  if(!s->fixed_qscale){
3736  /* finding spatial complexity for I-frame rate control */
3737  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3738  }
3739  }
3740  for(i=1; i<context_count; i++){
3741  merge_context_after_me(s, s->thread_context[i]);
3742  }
3743  s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3744  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3745  emms_c();
3746 
3747  if (s->me.scene_change_score > s->scenechange_threshold &&
3748  s->pict_type == AV_PICTURE_TYPE_P) {
3749  s->pict_type= AV_PICTURE_TYPE_I;
3750  for(i=0; i<s->mb_stride*s->mb_height; i++)
3751  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3752  if(s->msmpeg4_version >= 3)
3753  s->no_rounding=1;
3754  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3755  s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3756  }
3757 
3758  if(!s->umvplus){
3759  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3760  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3761 
3762  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3763  int a,b;
3764  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3765  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3766  s->f_code= FFMAX3(s->f_code, a, b);
3767  }
3768 
3770  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3771  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3772  int j;
3773  for(i=0; i<2; i++){
3774  for(j=0; j<2; j++)
3775  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3776  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3777  }
3778  }
3779  }
3780 
3781  if(s->pict_type==AV_PICTURE_TYPE_B){
3782  int a, b;
3783 
3784  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3785  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3786  s->f_code = FFMAX(a, b);
3787 
3788  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3789  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3790  s->b_code = FFMAX(a, b);
3791 
3792  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3793  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3794  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3795  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3796  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3797  int dir, j;
3798  for(dir=0; dir<2; dir++){
3799  for(i=0; i<2; i++){
3800  for(j=0; j<2; j++){
3803  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3804  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3805  }
3806  }
3807  }
3808  }
3809  }
3810  }
3811 
3812  if (estimate_qp(s, 0) < 0)
3813  return -1;
3814 
3815  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3816  s->pict_type == AV_PICTURE_TYPE_I &&
3817  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3818  s->qscale= 3; //reduce clipping problems
3819 
3820  if (s->out_format == FMT_MJPEG) {
3821  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3822  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3823 
3824  if (s->avctx->intra_matrix) {
3825  chroma_matrix =
3826  luma_matrix = s->avctx->intra_matrix;
3827  }
3828  if (s->avctx->chroma_intra_matrix)
3829  chroma_matrix = s->avctx->chroma_intra_matrix;
3830 
3831  /* for mjpeg, we do include qscale in the matrix */
3832  for(i=1;i<64;i++){
3833  int j = s->idsp.idct_permutation[i];
3834 
3835  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3836  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3837  }
3838  s->y_dc_scale_table=
3839  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3840  s->chroma_intra_matrix[0] =
3841  s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3842  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3843  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3844  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3845  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3846  s->qscale= 8;
3847  }
3848  if(s->codec_id == AV_CODEC_ID_AMV){
3849  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3850  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3851  for(i=1;i<64;i++){
3852  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3853 
3854  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3855  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3856  }
3857  s->y_dc_scale_table= y;
3858  s->c_dc_scale_table= c;
3859  s->intra_matrix[0] = 13;
3860  s->chroma_intra_matrix[0] = 14;
3861  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3862  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3863  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3864  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3865  s->qscale= 8;
3866  }
3867 
3868  //FIXME var duplication
3869  s->current_picture_ptr->f->key_frame =
3870  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3871  s->current_picture_ptr->f->pict_type =
3872  s->current_picture.f->pict_type = s->pict_type;
3873 
3874  if (s->current_picture.f->key_frame)
3875  s->picture_in_gop_number=0;
3876 
3877  s->mb_x = s->mb_y = 0;
3878  s->last_bits= put_bits_count(&s->pb);
3879  switch(s->out_format) {
3880  case FMT_MJPEG:
3881  if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3882  ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3883  s->pred, s->intra_matrix, s->chroma_intra_matrix);
3884  break;
3885  case FMT_H261:
3886  if (CONFIG_H261_ENCODER)
3888  break;
3889  case FMT_H263:
3890  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3892  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3894  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3896  if (ret < 0)
3897  return ret;
3898  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3900  if (ret < 0)
3901  return ret;
3902  }
3903  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3905  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3907  else if (CONFIG_H263_ENCODER)
3909  break;
3910  case FMT_MPEG1:
3911  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3913  break;
3914  default:
3915  av_assert0(0);
3916  }
3917  bits= put_bits_count(&s->pb);
3918  s->header_bits= bits - s->last_bits;
3919 
3920  for(i=1; i<context_count; i++){
3921  update_duplicate_context_after_me(s->thread_context[i], s);
3922  }
3923  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3924  for(i=1; i<context_count; i++){
3925  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3926  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3927  merge_context_after_encode(s, s->thread_context[i]);
3928  }
3929  emms_c();
3930  return 0;
3931 }
3932 
3933 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3934  const int intra= s->mb_intra;
3935  int i;
3936 
3937  s->dct_count[intra]++;
3938 
3939  for(i=0; i<64; i++){
3940  int level= block[i];
3941 
3942  if(level){
3943  if(level>0){
3944  s->dct_error_sum[intra][i] += level;
3945  level -= s->dct_offset[intra][i];
3946  if(level<0) level=0;
3947  }else{
3948  s->dct_error_sum[intra][i] -= level;
3949  level += s->dct_offset[intra][i];
3950  if(level>0) level=0;
3951  }
3952  block[i]= level;
3953  }
3954  }
3955 }
3956 
3958  int16_t *block, int n,
3959  int qscale, int *overflow){
3960  const int *qmat;
3961  const uint16_t *matrix;
3962  const uint8_t *scantable;
3963  const uint8_t *perm_scantable;
3964  int max=0;
3965  unsigned int threshold1, threshold2;
3966  int bias=0;
3967  int run_tab[65];
3968  int level_tab[65];
3969  int score_tab[65];
3970  int survivor[65];
3971  int survivor_count;
3972  int last_run=0;
3973  int last_level=0;
3974  int last_score= 0;
3975  int last_i;
3976  int coeff[2][64];
3977  int coeff_count[64];
3978  int qmul, qadd, start_i, last_non_zero, i, dc;
3979  const int esc_length= s->ac_esc_length;
3980  uint8_t * length;
3981  uint8_t * last_length;
3982  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3983  int mpeg2_qscale;
3984 
3985  s->fdsp.fdct(block);
3986 
3987  if(s->dct_error_sum)
3988  s->denoise_dct(s, block);
3989  qmul= qscale*16;
3990  qadd= ((qscale-1)|1)*8;
3991 
3992  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3993  else mpeg2_qscale = qscale << 1;
3994 
3995  if (s->mb_intra) {
3996  int q;
3997  scantable= s->intra_scantable.scantable;
3998  perm_scantable= s->intra_scantable.permutated;
3999  if (!s->h263_aic) {
4000  if (n < 4)
4001  q = s->y_dc_scale;
4002  else
4003  q = s->c_dc_scale;
4004  q = q << 3;
4005  } else{
4006  /* For AIC we skip quant/dequant of INTRADC */
4007  q = 1 << 3;
4008  qadd=0;
4009  }
4010 
4011  /* note: block[0] is assumed to be positive */
4012  block[0] = (block[0] + (q >> 1)) / q;
4013  start_i = 1;
4014  last_non_zero = 0;
4015  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4016  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4017  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4018  bias= 1<<(QMAT_SHIFT-1);
4019 
4020  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4021  length = s->intra_chroma_ac_vlc_length;
4022  last_length= s->intra_chroma_ac_vlc_last_length;
4023  } else {
4024  length = s->intra_ac_vlc_length;
4025  last_length= s->intra_ac_vlc_last_length;
4026  }
4027  } else {
4028  scantable= s->inter_scantable.scantable;
4029  perm_scantable= s->inter_scantable.permutated;
4030  start_i = 0;
4031  last_non_zero = -1;
4032  qmat = s->q_inter_matrix[qscale];
4033  matrix = s->inter_matrix;
4034  length = s->inter_ac_vlc_length;
4035  last_length= s->inter_ac_vlc_last_length;
4036  }
4037  last_i= start_i;
4038 
4039  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4040  threshold2= (threshold1<<1);
4041 
4042  for(i=63; i>=start_i; i--) {
4043  const int j = scantable[i];
4044  int level = block[j] * qmat[j];
4045 
4046  if(((unsigned)(level+threshold1))>threshold2){
4047  last_non_zero = i;
4048  break;
4049  }
4050  }
4051 
4052  for(i=start_i; i<=last_non_zero; i++) {
4053  const int j = scantable[i];
4054  int level = block[j] * qmat[j];
4055 
4056 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4057 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4058  if(((unsigned)(level+threshold1))>threshold2){
4059  if(level>0){
4060  level= (bias + level)>>QMAT_SHIFT;
4061  coeff[0][i]= level;
4062  coeff[1][i]= level-1;
4063 // coeff[2][k]= level-2;
4064  }else{
4065  level= (bias - level)>>QMAT_SHIFT;
4066  coeff[0][i]= -level;
4067  coeff[1][i]= -level+1;
4068 // coeff[2][k]= -level+2;
4069  }
4070  coeff_count[i]= FFMIN(level, 2);
4071  av_assert2(coeff_count[i]);
4072  max |=level;
4073  }else{
4074  coeff[0][i]= (level>>31)|1;
4075  coeff_count[i]= 1;
4076  }
4077  }
4078 
4079  *overflow= s->max_qcoeff < max; //overflow might have happened
4080 
4081  if(last_non_zero < start_i){
4082  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4083  return last_non_zero;
4084  }
4085 
4086  score_tab[start_i]= 0;
4087  survivor[0]= start_i;
4088  survivor_count= 1;
4089 
4090  for(i=start_i; i<=last_non_zero; i++){
4091  int level_index, j, zero_distortion;
4092  int dct_coeff= FFABS(block[ scantable[i] ]);
4093  int best_score=256*256*256*120;
4094 
4095  if (s->fdsp.fdct == ff_fdct_ifast)
4096  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4097  zero_distortion= dct_coeff*dct_coeff;
4098 
4099  for(level_index=0; level_index < coeff_count[i]; level_index++){
4100  int distortion;
4101  int level= coeff[level_index][i];
4102  const int alevel= FFABS(level);
4103  int unquant_coeff;
4104 
4105  av_assert2(level);
4106 
4107  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4108  unquant_coeff= alevel*qmul + qadd;
4109  } else if(s->out_format == FMT_MJPEG) {
4110  j = s->idsp.idct_permutation[scantable[i]];
4111  unquant_coeff = alevel * matrix[j] * 8;
4112  }else{ // MPEG-1
4113  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4114  if(s->mb_intra){
4115  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4116  unquant_coeff = (unquant_coeff - 1) | 1;
4117  }else{
4118  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4119  unquant_coeff = (unquant_coeff - 1) | 1;
4120  }
4121  unquant_coeff<<= 3;
4122  }
4123 
4124  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4125  level+=64;
4126  if((level&(~127)) == 0){
4127  for(j=survivor_count-1; j>=0; j--){
4128  int run= i - survivor[j];
4129  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4130  score += score_tab[i-run];
4131 
4132  if(score < best_score){
4133  best_score= score;
4134  run_tab[i+1]= run;
4135  level_tab[i+1]= level-64;
4136  }
4137  }
4138 
4139  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4140  for(j=survivor_count-1; j>=0; j--){
4141  int run= i - survivor[j];
4142  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4143  score += score_tab[i-run];
4144  if(score < last_score){
4145  last_score= score;
4146  last_run= run;
4147  last_level= level-64;
4148  last_i= i+1;
4149  }
4150  }
4151  }
4152  }else{
4153  distortion += esc_length*lambda;
4154  for(j=survivor_count-1; j>=0; j--){
4155  int run= i - survivor[j];
4156  int score= distortion + score_tab[i-run];
4157 
4158  if(score < best_score){
4159  best_score= score;
4160  run_tab[i+1]= run;
4161  level_tab[i+1]= level-64;
4162  }
4163  }
4164 
4165  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4166  for(j=survivor_count-1; j>=0; j--){