FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <stdint.h>
37 
38 #include "libavutil/emms.h"
39 #include "libavutil/internal.h"
40 #include "libavutil/intmath.h"
41 #include "libavutil/mathematics.h"
42 #include "libavutil/mem.h"
43 #include "libavutil/mem_internal.h"
44 #include "libavutil/opt.h"
45 #include "libavutil/thread.h"
46 #include "avcodec.h"
47 #include "encode.h"
48 #include "idctdsp.h"
49 #include "mpeg12codecs.h"
50 #include "mpeg12data.h"
51 #include "mpeg12enc.h"
52 #include "mpegvideo.h"
53 #include "mpegvideodata.h"
54 #include "mpegvideoenc.h"
55 #include "h261enc.h"
56 #include "h263.h"
57 #include "h263data.h"
58 #include "h263enc.h"
59 #include "mjpegenc_common.h"
60 #include "mathops.h"
61 #include "mpegutils.h"
62 #include "mjpegenc.h"
63 #include "speedhqenc.h"
64 #include "msmpeg4enc.h"
65 #include "pixblockdsp.h"
66 #include "qpeldsp.h"
67 #include "faandct.h"
68 #include "aandcttab.h"
69 #include "flvenc.h"
70 #include "mpeg4video.h"
71 #include "mpeg4videodata.h"
72 #include "mpeg4videoenc.h"
73 #include "internal.h"
74 #include "bytestream.h"
75 #include "wmv2enc.h"
76 #include "rv10enc.h"
77 #include "packet_internal.h"
78 #include "libavutil/refstruct.h"
79 #include <limits.h>
80 #include "sp5x.h"
81 
82 #define QUANT_BIAS_SHIFT 8
83 
84 #define QMAT_SHIFT_MMX 16
85 #define QMAT_SHIFT 21
86 
87 static int encode_picture(MpegEncContext *s, const AVPacket *pkt);
88 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
89 static int sse_mb(MpegEncContext *s);
90 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
91 static int dct_quantize_c(MpegEncContext *s,
92  int16_t *block, int n,
93  int qscale, int *overflow);
94 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
95 
96 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
97 
98 static const AVOption mpv_generic_options[] = {
101  { NULL },
102 };
103 
105  .class_name = "generic mpegvideo encoder",
106  .item_name = av_default_item_name,
107  .option = mpv_generic_options,
108  .version = LIBAVUTIL_VERSION_INT,
109 };
110 
111 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
112  uint16_t (*qmat16)[2][64],
113  const uint16_t *quant_matrix,
114  int bias, int qmin, int qmax, int intra)
115 {
116  FDCTDSPContext *fdsp = &s->fdsp;
117  int qscale;
118  int shift = 0;
119 
120  for (qscale = qmin; qscale <= qmax; qscale++) {
121  int i;
122  int qscale2;
123 
124  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
125  else qscale2 = qscale << 1;
126 
127  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
128 #if CONFIG_FAANDCT
129  fdsp->fdct == ff_faandct ||
130 #endif /* CONFIG_FAANDCT */
132  for (i = 0; i < 64; i++) {
133  const int j = s->idsp.idct_permutation[i];
134  int64_t den = (int64_t) qscale2 * quant_matrix[j];
135  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
136  * Assume x = qscale2 * quant_matrix[j]
137  * 1 <= x <= 28560
138  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
139  * 4194304 >= (1 << 22) / (x) >= 146 */
140 
141  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
142  }
143  } else if (fdsp->fdct == ff_fdct_ifast) {
144  for (i = 0; i < 64; i++) {
145  const int j = s->idsp.idct_permutation[i];
146  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
147  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
148  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
149  * 1247 <= x <= 900239760
150  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
151  * 55107840 >= (1 << 36) / (x) >= 76 */
152 
153  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
154  }
155  } else {
156  for (i = 0; i < 64; i++) {
157  const int j = s->idsp.idct_permutation[i];
158  int64_t den = (int64_t) qscale2 * quant_matrix[j];
159  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
160  * Assume x = qscale2 * quant_matrix[j]
161  * 1 <= x <= 28560
162  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
163  * 4194304 >= (1 << 22) / (x) >= 146
164  *
165  * 1 <= x <= 28560
166  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
167  * 131072 >= (1 << 17) / (x) >= 4 */
168 
169  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
170  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
171 
172  if (qmat16[qscale][0][i] == 0 ||
173  qmat16[qscale][0][i] == 128 * 256)
174  qmat16[qscale][0][i] = 128 * 256 - 1;
175  qmat16[qscale][1][i] =
176  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
177  qmat16[qscale][0][i]);
178  }
179  }
180 
181  for (i = intra; i < 64; i++) {
182  int64_t max = 8191;
183  if (fdsp->fdct == ff_fdct_ifast) {
184  max = (8191LL * ff_aanscales[i]) >> 14;
185  }
186  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
187  shift++;
188  }
189  }
190  }
191  if (shift) {
192  av_log(s->avctx, AV_LOG_INFO,
193  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
194  QMAT_SHIFT - shift);
195  }
196 }
197 
198 static inline void update_qscale(MpegEncContext *s)
199 {
200  if (s->q_scale_type == 1 && 0) {
201  int i;
202  int bestdiff=INT_MAX;
203  int best = 1;
204 
205  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
206  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
207  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
208  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
209  continue;
210  if (diff < bestdiff) {
211  bestdiff = diff;
212  best = i;
213  }
214  }
215  s->qscale = best;
216  } else {
217  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
218  (FF_LAMBDA_SHIFT + 7);
219  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
220  }
221 
222  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
224 }
225 
227 {
228  int i;
229 
230  if (matrix) {
231  put_bits(pb, 1, 1);
232  for (i = 0; i < 64; i++) {
234  }
235  } else
236  put_bits(pb, 1, 0);
237 }
238 
239 /**
240  * init s->cur_pic.qscale_table from s->lambda_table
241  */
243 {
244  int8_t * const qscale_table = s->cur_pic.qscale_table;
245  int i;
246 
247  for (i = 0; i < s->mb_num; i++) {
248  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
249  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
250  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
251  s->avctx->qmax);
252  }
253 }
254 
256  const MpegEncContext *src)
257 {
258 #define COPY(a) dst->a= src->a
259  COPY(pict_type);
260  COPY(f_code);
261  COPY(b_code);
262  COPY(qscale);
263  COPY(lambda);
264  COPY(lambda2);
265  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
266  COPY(progressive_frame); // FIXME don't set in encode_header
267  COPY(partitioned_frame); // FIXME don't set in encode_header
268 #undef COPY
269 }
270 
271 static void mpv_encode_init_static(void)
272 {
273  for (int i = -16; i < 16; i++)
274  default_fcode_tab[i + MAX_MV] = 1;
275 }
276 
277 /**
278  * Set the given MpegEncContext to defaults for encoding.
279  * the changed fields will not depend upon the prior state of the MpegEncContext.
280  */
282 {
283  static AVOnce init_static_once = AV_ONCE_INIT;
284 
286 
287  ff_thread_once(&init_static_once, mpv_encode_init_static);
288 
289  s->fcode_tab = default_fcode_tab + MAX_MV;
290 
291  s->input_picture_number = 0;
292  s->picture_in_gop_number = 0;
293 }
294 
296 {
297  s->dct_quantize = dct_quantize_c;
298  s->denoise_dct = denoise_dct_c;
299 
300 #if ARCH_MIPS
302 #elif ARCH_X86
304 #endif
305 
306  if (s->avctx->trellis)
307  s->dct_quantize = dct_quantize_trellis_c;
308 }
309 
311 {
312  MECmpContext mecc;
313  me_cmp_func me_cmp[6];
314  int ret;
315 
316  ff_me_cmp_init(&mecc, avctx);
317  ret = ff_me_init(&s->me, avctx, &mecc, 1);
318  if (ret < 0)
319  return ret;
320  ret = ff_set_cmp(&mecc, me_cmp, s->frame_skip_cmp, 1);
321  if (ret < 0)
322  return ret;
323  s->frame_skip_cmp_fn = me_cmp[1];
325  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
326  if (ret < 0)
327  return ret;
328  if (!me_cmp[0] || !me_cmp[4])
329  return AVERROR(EINVAL);
330  s->ildct_cmp[0] = me_cmp[0];
331  s->ildct_cmp[1] = me_cmp[4];
332  }
333 
334  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
335 
336  s->sse_cmp[0] = mecc.sse[0];
337  s->sse_cmp[1] = mecc.sse[1];
338  s->sad_cmp[0] = mecc.sad[0];
339  s->sad_cmp[1] = mecc.sad[1];
340  if (avctx->mb_cmp == FF_CMP_NSSE) {
341  s->n_sse_cmp[0] = mecc.nsse[0];
342  s->n_sse_cmp[1] = mecc.nsse[1];
343  } else {
344  s->n_sse_cmp[0] = mecc.sse[0];
345  s->n_sse_cmp[1] = mecc.sse[1];
346  }
347 
348  return 0;
349 }
350 
351 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
353 {
354  const int nb_matrices = 1 + (s->out_format == FMT_MJPEG) + !s->intra_only;
355  const uint16_t *intra_matrix, *inter_matrix;
356  int ret;
357 
358  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
359  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
360  return AVERROR(ENOMEM);
361 
362  if (s->out_format == FMT_MJPEG) {
363  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
364  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
365  // No need to set q_inter_matrix
366  av_assert1(s->intra_only);
367  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
368  return 0;
369  } else {
370  s->q_chroma_intra_matrix = s->q_intra_matrix;
371  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
372  }
373  if (!s->intra_only) {
374  s->q_inter_matrix = s->q_intra_matrix + 32;
375  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
376  }
377 
378  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
379  s->mpeg_quant) {
382  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
383  intra_matrix =
385  } else {
386  /* MPEG-1/2, SpeedHQ */
389  }
390  if (avctx->intra_matrix)
392  if (avctx->inter_matrix)
394 
395  /* init q matrix */
396  for (int i = 0; i < 64; i++) {
397  int j = s->idsp.idct_permutation[i];
398 
399  s->intra_matrix[j] = s->chroma_intra_matrix[j] = intra_matrix[i];
400  s->inter_matrix[j] = inter_matrix[i];
401  }
402 
403  /* precompute matrix */
405  if (ret < 0)
406  return ret;
407 
408  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
409  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
410  31, 1);
411  if (s->q_inter_matrix)
412  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
413  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
414  31, 0);
415 
416  return 0;
417 }
418 
419 /* init video encoder */
421 {
423  AVCPBProperties *cpb_props;
424  int i, ret;
425  int mb_array_size, mv_table_size;
426 
428 
429  switch (avctx->pix_fmt) {
430  case AV_PIX_FMT_YUVJ444P:
431  case AV_PIX_FMT_YUV444P:
432  s->chroma_format = CHROMA_444;
433  break;
434  case AV_PIX_FMT_YUVJ422P:
435  case AV_PIX_FMT_YUV422P:
436  s->chroma_format = CHROMA_422;
437  break;
438  case AV_PIX_FMT_YUVJ420P:
439  case AV_PIX_FMT_YUV420P:
440  default:
441  s->chroma_format = CHROMA_420;
442  break;
443  }
444 
446 
447  s->bit_rate = avctx->bit_rate;
448  s->width = avctx->width;
449  s->height = avctx->height;
450  if (avctx->gop_size > 600 &&
453  "keyframe interval too large!, reducing it from %d to %d\n",
454  avctx->gop_size, 600);
455  avctx->gop_size = 600;
456  }
457  s->gop_size = avctx->gop_size;
458  s->avctx = avctx;
460  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
461  "is %d.\n", MAX_B_FRAMES);
463  } else if (avctx->max_b_frames < 0) {
465  "max b frames must be 0 or positive for mpegvideo based encoders\n");
466  return AVERROR(EINVAL);
467  }
468  s->max_b_frames = avctx->max_b_frames;
469  s->codec_id = avctx->codec->id;
470  if (s->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
471  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
472  return AVERROR(EINVAL);
473  }
474 
475  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
476  s->rtp_mode = !!s->rtp_payload_size;
477  s->intra_dc_precision = avctx->intra_dc_precision;
478 
479  // workaround some differences between how applications specify dc precision
480  if (s->intra_dc_precision < 0) {
481  s->intra_dc_precision += 8;
482  } else if (s->intra_dc_precision >= 8)
483  s->intra_dc_precision -= 8;
484 
485  if (s->intra_dc_precision < 0) {
487  "intra dc precision must be positive, note some applications use"
488  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
489  return AVERROR(EINVAL);
490  }
491 
492  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
493  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
494  return AVERROR(EINVAL);
495  }
496  s->user_specified_pts = AV_NOPTS_VALUE;
497 
498  if (s->gop_size <= 1) {
499  s->intra_only = 1;
500  s->gop_size = 12;
501  } else {
502  s->intra_only = 0;
503  }
504 
505  /* Fixed QSCALE */
506  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
507 
508  s->adaptive_quant = (avctx->lumi_masking ||
509  avctx->dark_masking ||
512  avctx->p_masking ||
513  s->border_masking ||
514  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
515  !s->fixed_qscale;
516 
517  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
518 
520  switch(avctx->codec_id) {
523  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
524  break;
525  case AV_CODEC_ID_MPEG4:
529  if (avctx->rc_max_rate >= 15000000) {
530  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
531  } else if(avctx->rc_max_rate >= 2000000) {
532  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
533  } else if(avctx->rc_max_rate >= 384000) {
534  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
535  } else
536  avctx->rc_buffer_size = 40;
537  avctx->rc_buffer_size *= 16384;
538  break;
539  }
540  if (avctx->rc_buffer_size) {
541  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
542  }
543  }
544 
545  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
546  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
547  return AVERROR(EINVAL);
548  }
549 
552  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
553  }
554 
556  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
557  return AVERROR(EINVAL);
558  }
559 
561  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
562  return AVERROR(EINVAL);
563  }
564 
565  if (avctx->rc_max_rate &&
569  "impossible bitrate constraints, this will fail\n");
570  }
571 
572  if (avctx->rc_buffer_size &&
575  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
576  return AVERROR(EINVAL);
577  }
578 
579  if (!s->fixed_qscale &&
582  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
584  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
585  if (nbt <= INT_MAX) {
586  avctx->bit_rate_tolerance = nbt;
587  } else
588  avctx->bit_rate_tolerance = INT_MAX;
589  }
590 
591  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
592  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
593  s->codec_id != AV_CODEC_ID_FLV1) {
594  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
595  return AVERROR(EINVAL);
596  }
597 
598  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
600  "OBMC is only supported with simple mb decision\n");
601  return AVERROR(EINVAL);
602  }
603 
604  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
605  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
606  return AVERROR(EINVAL);
607  }
608 
609  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
610  s->codec_id == AV_CODEC_ID_H263 ||
611  s->codec_id == AV_CODEC_ID_H263P) &&
612  (avctx->sample_aspect_ratio.num > 255 ||
613  avctx->sample_aspect_ratio.den > 255)) {
615  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
619  }
620 
621  if ((s->codec_id == AV_CODEC_ID_H263 ||
622  s->codec_id == AV_CODEC_ID_H263P) &&
623  (avctx->width > 2048 ||
624  avctx->height > 1152 )) {
625  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
626  return AVERROR(EINVAL);
627  }
628  if (s->codec_id == AV_CODEC_ID_FLV1 &&
629  (avctx->width > 65535 ||
630  avctx->height > 65535 )) {
631  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
632  return AVERROR(EINVAL);
633  }
634  if ((s->codec_id == AV_CODEC_ID_H263 ||
635  s->codec_id == AV_CODEC_ID_H263P ||
636  s->codec_id == AV_CODEC_ID_RV20) &&
637  ((avctx->width &3) ||
638  (avctx->height&3) )) {
639  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
640  return AVERROR(EINVAL);
641  }
642 
643  if (s->codec_id == AV_CODEC_ID_RV10 &&
644  (avctx->width &15 ||
645  avctx->height&15 )) {
646  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
647  return AVERROR(EINVAL);
648  }
649 
650  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
651  s->codec_id == AV_CODEC_ID_WMV2) &&
652  avctx->width & 1) {
653  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
654  return AVERROR(EINVAL);
655  }
656 
658  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
659  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
660  return AVERROR(EINVAL);
661  }
662 
663  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
664  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
665  return AVERROR(EINVAL);
666  }
667 
668  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
670  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
671  return AVERROR(EINVAL);
672  }
673 
674  if (s->scenechange_threshold < 1000000000 &&
677  "closed gop with scene change detection are not supported yet, "
678  "set threshold to 1000000000\n");
679  return AVERROR_PATCHWELCOME;
680  }
681 
683  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
686  "low delay forcing is only available for mpeg2, "
687  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
688  return AVERROR(EINVAL);
689  }
690  if (s->max_b_frames != 0) {
692  "B-frames cannot be used with low delay\n");
693  return AVERROR(EINVAL);
694  }
695  }
696 
697  if (avctx->slices > 1 &&
699  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
700  return AVERROR(EINVAL);
701  }
702 
703  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
705  "notice: b_frame_strategy only affects the first pass\n");
706  s->b_frame_strategy = 0;
707  }
708 
710  if (i > 1) {
711  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
712  avctx->time_base.den /= i;
713  avctx->time_base.num /= i;
714  //return -1;
715  }
716 
717  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
718  // (a + x * 3 / 8) / x
719  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
720  s->inter_quant_bias = 0;
721  } else {
722  s->intra_quant_bias = 0;
723  // (a - x / 4) / x
724  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
725  }
726 
727  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
728  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
729  return AVERROR(EINVAL);
730  }
731 
732  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
733 
734  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
735  avctx->time_base.den > (1 << 16) - 1) {
737  "timebase %d/%d not supported by MPEG 4 standard, "
738  "the maximum admitted value for the timebase denominator "
739  "is %d\n", avctx->time_base.num, avctx->time_base.den,
740  (1 << 16) - 1);
741  return AVERROR(EINVAL);
742  }
743  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
744 
745  switch (avctx->codec->id) {
746 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
748  s->rtp_mode = 1;
749  /* fallthrough */
751  s->out_format = FMT_MPEG1;
752  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
753  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
755  break;
756 #endif
757 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
758  case AV_CODEC_ID_MJPEG:
759  case AV_CODEC_ID_AMV:
760  s->out_format = FMT_MJPEG;
761  s->intra_only = 1; /* force intra only for jpeg */
762  if ((ret = ff_mjpeg_encode_init(s)) < 0)
763  return ret;
764  avctx->delay = 0;
765  s->low_delay = 1;
766  break;
767 #endif
768  case AV_CODEC_ID_SPEEDHQ:
769  s->out_format = FMT_SPEEDHQ;
770  s->intra_only = 1; /* force intra only for SHQ */
771  if (!CONFIG_SPEEDHQ_ENCODER)
773  if ((ret = ff_speedhq_encode_init(s)) < 0)
774  return ret;
775  avctx->delay = 0;
776  s->low_delay = 1;
777  break;
778  case AV_CODEC_ID_H261:
779  if (!CONFIG_H261_ENCODER)
782  if (ret < 0)
783  return ret;
784  s->out_format = FMT_H261;
785  avctx->delay = 0;
786  s->low_delay = 1;
787  s->rtp_mode = 0; /* Sliced encoding not supported */
788  break;
789  case AV_CODEC_ID_H263:
790  if (!CONFIG_H263_ENCODER)
793  s->width, s->height) == 8) {
795  "The specified picture size of %dx%d is not valid for "
796  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
797  "352x288, 704x576, and 1408x1152. "
798  "Try H.263+.\n", s->width, s->height);
799  return AVERROR(EINVAL);
800  }
801  s->out_format = FMT_H263;
802  avctx->delay = 0;
803  s->low_delay = 1;
804  break;
805  case AV_CODEC_ID_H263P:
806  s->out_format = FMT_H263;
807  s->h263_plus = 1;
808  /* Fx */
809  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
810  s->modified_quant = s->h263_aic;
811  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
812  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
813  s->flipflop_rounding = 1;
814 
815  /* /Fx */
816  /* These are just to be sure */
817  avctx->delay = 0;
818  s->low_delay = 1;
819  break;
820  case AV_CODEC_ID_FLV1:
821  s->out_format = FMT_H263;
822  s->h263_flv = 2; /* format = 1; 11-bit codes */
823  s->unrestricted_mv = 1;
824  s->rtp_mode = 0; /* don't allow GOB */
825  avctx->delay = 0;
826  s->low_delay = 1;
827  break;
828  case AV_CODEC_ID_RV10:
829  s->out_format = FMT_H263;
830  avctx->delay = 0;
831  s->low_delay = 1;
832  break;
833  case AV_CODEC_ID_RV20:
834  s->out_format = FMT_H263;
835  avctx->delay = 0;
836  s->low_delay = 1;
837  s->modified_quant = 1;
838  s->h263_aic = 1;
839  s->h263_plus = 1;
840  s->loop_filter = 1;
841  s->unrestricted_mv = 0;
842  break;
843  case AV_CODEC_ID_MPEG4:
844  s->out_format = FMT_H263;
845  s->h263_pred = 1;
846  s->unrestricted_mv = 1;
847  s->flipflop_rounding = 1;
848  s->low_delay = s->max_b_frames ? 0 : 1;
849  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
850  break;
852  s->out_format = FMT_H263;
853  s->h263_pred = 1;
854  s->unrestricted_mv = 1;
855  s->msmpeg4_version = MSMP4_V2;
856  avctx->delay = 0;
857  s->low_delay = 1;
858  break;
860  s->out_format = FMT_H263;
861  s->h263_pred = 1;
862  s->unrestricted_mv = 1;
863  s->msmpeg4_version = MSMP4_V3;
864  s->flipflop_rounding = 1;
865  avctx->delay = 0;
866  s->low_delay = 1;
867  break;
868  case AV_CODEC_ID_WMV1:
869  s->out_format = FMT_H263;
870  s->h263_pred = 1;
871  s->unrestricted_mv = 1;
872  s->msmpeg4_version = MSMP4_WMV1;
873  s->flipflop_rounding = 1;
874  avctx->delay = 0;
875  s->low_delay = 1;
876  break;
877  case AV_CODEC_ID_WMV2:
878  s->out_format = FMT_H263;
879  s->h263_pred = 1;
880  s->unrestricted_mv = 1;
881  s->msmpeg4_version = MSMP4_WMV2;
882  s->flipflop_rounding = 1;
883  avctx->delay = 0;
884  s->low_delay = 1;
885  break;
886  default:
887  return AVERROR(EINVAL);
888  }
889 
890  avctx->has_b_frames = !s->low_delay;
891 
892  s->encoding = 1;
893 
894  s->progressive_frame =
895  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
897  s->alternate_scan);
898 
899  if (s->lmin > s->lmax) {
900  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", s->lmax);
901  s->lmin = s->lmax;
902  }
903 
904  /* init */
906  if ((ret = ff_mpv_common_init(s)) < 0)
907  return ret;
908 
909  ff_fdctdsp_init(&s->fdsp, avctx);
910  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
911  ff_pixblockdsp_init(&s->pdsp, avctx);
912  ret = me_cmp_init(s, avctx);
913  if (ret < 0)
914  return ret;
915 
916  if (!(avctx->stats_out = av_mallocz(256)) ||
917  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_B_FRAMES + 1) ||
918  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_B_FRAMES + 1) ||
919  !(s->new_pic = av_frame_alloc()) ||
920  !(s->picture_pool = ff_mpv_alloc_pic_pool(0)))
921  return AVERROR(ENOMEM);
922 
923  ret = init_matrices(s, avctx);
924  if (ret < 0)
925  return ret;
926 
927  /* Allocate MV tables; the MV and MB tables will be copied
928  * to slice contexts by ff_update_duplicate_context(). */
929  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
930  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
931  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
932  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
933  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
934  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
935  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
936  return AVERROR(ENOMEM);
937  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
938  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
939  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
940  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
941  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
942  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
943 
944  /* Allocate MB type table */
945  mb_array_size = s->mb_stride * s->mb_height;
946  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
947  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
948  !FF_ALLOCZ_TYPED_ARRAY(s->mc_mb_var, mb_array_size) ||
949  !FF_ALLOCZ_TYPED_ARRAY(s->mb_var, mb_array_size) ||
950  !(s->mb_mean = av_mallocz(mb_array_size)))
951  return AVERROR(ENOMEM);
952 
953  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
954  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
955  int16_t (*tmp1)[2];
956  uint8_t *tmp2;
957  if (!(tmp1 = ALLOCZ_ARRAYS(s->b_field_mv_table_base, 8, mv_table_size)) ||
958  !(tmp2 = ALLOCZ_ARRAYS(s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
959  !ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * 2, mv_table_size))
960  return AVERROR(ENOMEM);
961 
962  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
963  tmp1 += s->mb_stride + 1;
964 
965  for (int i = 0; i < 2; i++) {
966  for (int j = 0; j < 2; j++) {
967  for (int k = 0; k < 2; k++) {
968  s->b_field_mv_table[i][j][k] = tmp1;
969  tmp1 += mv_table_size;
970  }
971  s->b_field_select_table[i][j] = tmp2;
972  tmp2 += 2 * mv_table_size;
973  }
974  }
975  }
976 
977  if (s->noise_reduction) {
978  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
979  return AVERROR(ENOMEM);
980  }
981 
983 
984  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
985  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
986  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
987  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
988  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
989  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
990  } else {
991  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
992  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
993  }
994 
995  if (s->slice_context_count > 1) {
996  s->rtp_mode = 1;
997 
999  s->h263_slice_structured = 1;
1000  }
1001 
1002  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263) {
1004 #if CONFIG_MSMPEG4ENC
1005  if (s->msmpeg4_version != MSMP4_UNUSED)
1007 #endif
1008  }
1009 
1010  if ((ret = ff_rate_control_init(s)) < 0)
1011  return ret;
1012 
1013  if (s->b_frame_strategy == 2) {
1014  for (i = 0; i < s->max_b_frames + 2; i++) {
1015  s->tmp_frames[i] = av_frame_alloc();
1016  if (!s->tmp_frames[i])
1017  return AVERROR(ENOMEM);
1018 
1019  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1020  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1021  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1022 
1023  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1024  if (ret < 0)
1025  return ret;
1026  }
1027  }
1028 
1029  cpb_props = ff_encode_add_cpb_side_data(avctx);
1030  if (!cpb_props)
1031  return AVERROR(ENOMEM);
1032  cpb_props->max_bitrate = avctx->rc_max_rate;
1033  cpb_props->min_bitrate = avctx->rc_min_rate;
1034  cpb_props->avg_bitrate = avctx->bit_rate;
1035  cpb_props->buffer_size = avctx->rc_buffer_size;
1036 
1037  return 0;
1038 }
1039 
1041 {
1043  int i;
1044 
1045  ff_rate_control_uninit(&s->rc_context);
1046 
1048  av_refstruct_pool_uninit(&s->picture_pool);
1049 
1050  if (s->input_picture && s->reordered_input_picture) {
1051  for (int i = 0; i < MAX_B_FRAMES + 1; i++) {
1052  av_refstruct_unref(&s->input_picture[i]);
1053  av_refstruct_unref(&s->reordered_input_picture[i]);
1054  }
1055  }
1056  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1057  av_frame_free(&s->tmp_frames[i]);
1058 
1059  av_frame_free(&s->new_pic);
1060 
1062 
1063  av_freep(&s->p_mv_table_base);
1064  av_freep(&s->b_forw_mv_table_base);
1065  av_freep(&s->b_back_mv_table_base);
1066  av_freep(&s->b_bidir_forw_mv_table_base);
1067  av_freep(&s->b_bidir_back_mv_table_base);
1068  av_freep(&s->b_direct_mv_table_base);
1069  av_freep(&s->b_field_mv_table_base);
1070  av_freep(&s->b_field_select_table[0][0]);
1071  av_freep(&s->p_field_select_table[0]);
1072 
1073  av_freep(&s->mb_type);
1074  av_freep(&s->lambda_table);
1075 
1076  av_freep(&s->q_intra_matrix);
1077  av_freep(&s->q_intra_matrix16);
1078  av_freep(&s->input_picture);
1079  av_freep(&s->reordered_input_picture);
1080  av_freep(&s->dct_offset);
1081  av_freep(&s->mb_var);
1082  av_freep(&s->mc_mb_var);
1083  av_freep(&s->mb_mean);
1084 
1085  return 0;
1086 }
1087 
1088 /* put block[] to dest[] */
1089 static inline void put_dct(MpegEncContext *s,
1090  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1091 {
1092  s->dct_unquantize_intra(s, block, i, qscale);
1093  s->idsp.idct_put(dest, line_size, block);
1094 }
1095 
1096 static inline void add_dequant_dct(MpegEncContext *s,
1097  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1098 {
1099  if (s->block_last_index[i] >= 0) {
1100  s->dct_unquantize_inter(s, block, i, qscale);
1101 
1102  s->idsp.idct_add(dest, line_size, block);
1103  }
1104 }
1105 
1106 /**
1107  * Performs dequantization and IDCT (if necessary)
1108  */
1109 static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
1110 {
1111  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1112  /* print DCT coefficients */
1113  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1114  for (int i = 0; i < 6; i++) {
1115  for (int j = 0; j < 64; j++) {
1116  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1117  block[i][s->idsp.idct_permutation[j]]);
1118  }
1119  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1120  }
1121  }
1122 
1123  if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
1124  !((s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1125  s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
1126  uint8_t *dest_y = s->dest[0], *dest_cb = s->dest[1], *dest_cr = s->dest[2];
1127  int dct_linesize, dct_offset;
1128  const int linesize = s->cur_pic.linesize[0];
1129  const int uvlinesize = s->cur_pic.linesize[1];
1130  const int block_size = 8;
1131 
1132  dct_linesize = linesize << s->interlaced_dct;
1133  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
1134 
1135  if (!s->mb_intra) {
1136  /* No MC, as that was already done otherwise */
1137  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1138  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1139  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1140  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1141 
1142  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1143  if (s->chroma_y_shift) {
1144  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1145  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1146  } else {
1147  dct_linesize >>= 1;
1148  dct_offset >>= 1;
1149  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1150  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1151  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1152  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1153  }
1154  }
1155  } else {
1156  /* dct only in intra block */
1157  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1158  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1159  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1160  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1161 
1162  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1163  if (s->chroma_y_shift) {
1164  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1165  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1166  } else {
1167  dct_offset >>= 1;
1168  dct_linesize >>= 1;
1169  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1170  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1171  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1172  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1173  }
1174  }
1175  }
1176  }
1177 }
1178 
1179 static int get_sae(const uint8_t *src, int ref, int stride)
1180 {
1181  int x,y;
1182  int acc = 0;
1183 
1184  for (y = 0; y < 16; y++) {
1185  for (x = 0; x < 16; x++) {
1186  acc += FFABS(src[x + y * stride] - ref);
1187  }
1188  }
1189 
1190  return acc;
1191 }
1192 
1193 static int get_intra_count(MpegEncContext *s, const uint8_t *src,
1194  const uint8_t *ref, int stride)
1195 {
1196  int x, y, w, h;
1197  int acc = 0;
1198 
1199  w = s->width & ~15;
1200  h = s->height & ~15;
1201 
1202  for (y = 0; y < h; y += 16) {
1203  for (x = 0; x < w; x += 16) {
1204  int offset = x + y * stride;
1205  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1206  stride, 16);
1207  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1208  int sae = get_sae(src + offset, mean, stride);
1209 
1210  acc += sae + 500 < sad;
1211  }
1212  }
1213  return acc;
1214 }
1215 
1216 /**
1217  * Allocates new buffers for an AVFrame and copies the properties
1218  * from another AVFrame.
1219  */
1220 static int prepare_picture(MpegEncContext *s, AVFrame *f, const AVFrame *props_frame)
1221 {
1222  AVCodecContext *avctx = s->avctx;
1223  int ret;
1224 
1225  f->width = avctx->width + 2 * EDGE_WIDTH;
1226  f->height = avctx->height + 2 * EDGE_WIDTH;
1227 
1229  if (ret < 0)
1230  return ret;
1231 
1232  ret = ff_mpv_pic_check_linesize(avctx, f, &s->linesize, &s->uvlinesize);
1233  if (ret < 0)
1234  return ret;
1235 
1236  for (int i = 0; f->data[i]; i++) {
1237  int offset = (EDGE_WIDTH >> (i ? s->chroma_y_shift : 0)) *
1238  f->linesize[i] +
1239  (EDGE_WIDTH >> (i ? s->chroma_x_shift : 0));
1240  f->data[i] += offset;
1241  }
1242  f->width = avctx->width;
1243  f->height = avctx->height;
1244 
1245  ret = av_frame_copy_props(f, props_frame);
1246  if (ret < 0)
1247  return ret;
1248 
1249  return 0;
1250 }
1251 
1252 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1253 {
1254  MPVPicture *pic = NULL;
1255  int64_t pts;
1256  int display_picture_number = 0, ret;
1257  int encoding_delay = s->max_b_frames ? s->max_b_frames
1258  : (s->low_delay ? 0 : 1);
1259  int flush_offset = 1;
1260  int direct = 1;
1261 
1262  av_assert1(!s->input_picture[0]);
1263 
1264  if (pic_arg) {
1265  pts = pic_arg->pts;
1266  display_picture_number = s->input_picture_number++;
1267 
1268  if (pts != AV_NOPTS_VALUE) {
1269  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1270  int64_t last = s->user_specified_pts;
1271 
1272  if (pts <= last) {
1273  av_log(s->avctx, AV_LOG_ERROR,
1274  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1275  pts, last);
1276  return AVERROR(EINVAL);
1277  }
1278 
1279  if (!s->low_delay && display_picture_number == 1)
1280  s->dts_delta = pts - last;
1281  }
1282  s->user_specified_pts = pts;
1283  } else {
1284  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1285  s->user_specified_pts =
1286  pts = s->user_specified_pts + 1;
1287  av_log(s->avctx, AV_LOG_INFO,
1288  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1289  pts);
1290  } else {
1291  pts = display_picture_number;
1292  }
1293  }
1294 
1295  if (pic_arg->linesize[0] != s->linesize ||
1296  pic_arg->linesize[1] != s->uvlinesize ||
1297  pic_arg->linesize[2] != s->uvlinesize)
1298  direct = 0;
1299  if ((s->width & 15) || (s->height & 15))
1300  direct = 0;
1301  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1302  direct = 0;
1303  if (s->linesize & (STRIDE_ALIGN-1))
1304  direct = 0;
1305 
1306  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1307  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1308 
1309  pic = av_refstruct_pool_get(s->picture_pool);
1310  if (!pic)
1311  return AVERROR(ENOMEM);
1312 
1313  if (direct) {
1314  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1315  goto fail;
1316  pic->shared = 1;
1317  } else {
1318  ret = prepare_picture(s, pic->f, pic_arg);
1319  if (ret < 0)
1320  goto fail;
1321 
1322  for (int i = 0; i < 3; i++) {
1323  ptrdiff_t src_stride = pic_arg->linesize[i];
1324  ptrdiff_t dst_stride = i ? s->uvlinesize : s->linesize;
1325  int h_shift = i ? s->chroma_x_shift : 0;
1326  int v_shift = i ? s->chroma_y_shift : 0;
1327  int w = AV_CEIL_RSHIFT(s->width , h_shift);
1328  int h = AV_CEIL_RSHIFT(s->height, v_shift);
1329  const uint8_t *src = pic_arg->data[i];
1330  uint8_t *dst = pic->f->data[i];
1331  int vpad = 16;
1332 
1333  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1334  && !s->progressive_sequence
1335  && FFALIGN(s->height, 32) - s->height > 16)
1336  vpad = 32;
1337 
1338  if (!s->avctx->rc_buffer_size)
1339  dst += INPLACE_OFFSET;
1340 
1341  if (src_stride == dst_stride)
1342  memcpy(dst, src, src_stride * h - src_stride + w);
1343  else {
1344  int h2 = h;
1345  uint8_t *dst2 = dst;
1346  while (h2--) {
1347  memcpy(dst2, src, w);
1348  dst2 += dst_stride;
1349  src += src_stride;
1350  }
1351  }
1352  if ((s->width & 15) || (s->height & (vpad-1))) {
1353  s->mpvencdsp.draw_edges(dst, dst_stride,
1354  w, h,
1355  16 >> h_shift,
1356  vpad >> v_shift,
1357  EDGE_BOTTOM);
1358  }
1359  }
1360  emms_c();
1361  }
1362 
1363  pic->display_picture_number = display_picture_number;
1364  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1365  } else if (!s->reordered_input_picture[1]) {
1366  /* Flushing: When the above check is true, the encoder is about to run
1367  * out of frames to encode. Check if there are input_pictures left;
1368  * if so, ensure s->input_picture[0] contains the first picture.
1369  * A flush_offset != 1 will only happen if we did not receive enough
1370  * input frames. */
1371  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1372  if (s->input_picture[flush_offset])
1373  break;
1374 
1375  encoding_delay -= flush_offset - 1;
1376  }
1377 
1378  /* shift buffer entries */
1379  for (int i = flush_offset; i <= MAX_B_FRAMES; i++)
1380  s->input_picture[i - flush_offset] = s->input_picture[i];
1381  for (int i = MAX_B_FRAMES + 1 - flush_offset; i <= MAX_B_FRAMES; i++)
1382  s->input_picture[i] = NULL;
1383 
1384  s->input_picture[encoding_delay] = pic;
1385 
1386  return 0;
1387 fail:
1388  av_refstruct_unref(&pic);
1389  return ret;
1390 }
1391 
1392 static int skip_check(MpegEncContext *s, const MPVPicture *p, const MPVPicture *ref)
1393 {
1394  int x, y, plane;
1395  int score = 0;
1396  int64_t score64 = 0;
1397 
1398  for (plane = 0; plane < 3; plane++) {
1399  const int stride = p->f->linesize[plane];
1400  const int bw = plane ? 1 : 2;
1401  for (y = 0; y < s->mb_height * bw; y++) {
1402  for (x = 0; x < s->mb_width * bw; x++) {
1403  int off = p->shared ? 0 : 16;
1404  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1405  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1406  int v = s->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1407 
1408  switch (FFABS(s->frame_skip_exp)) {
1409  case 0: score = FFMAX(score, v); break;
1410  case 1: score += FFABS(v); break;
1411  case 2: score64 += v * (int64_t)v; break;
1412  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1413  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1414  }
1415  }
1416  }
1417  }
1418  emms_c();
1419 
1420  if (score)
1421  score64 = score;
1422  if (s->frame_skip_exp < 0)
1423  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1424  -1.0/s->frame_skip_exp);
1425 
1426  if (score64 < s->frame_skip_threshold)
1427  return 1;
1428  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1429  return 1;
1430  return 0;
1431 }
1432 
1434 {
1435  int ret;
1436  int size = 0;
1437 
1439  if (ret < 0)
1440  return ret;
1441 
1442  do {
1444  if (ret >= 0) {
1445  size += pkt->size;
1447  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1448  return ret;
1449  } while (ret >= 0);
1450 
1451  return size;
1452 }
1453 
1455 {
1456  AVPacket *pkt;
1457  const int scale = s->brd_scale;
1458  int width = s->width >> scale;
1459  int height = s->height >> scale;
1460  int i, j, out_size, p_lambda, b_lambda, lambda2;
1461  int64_t best_rd = INT64_MAX;
1462  int best_b_count = -1;
1463  int ret = 0;
1464 
1465  av_assert0(scale >= 0 && scale <= 3);
1466 
1467  pkt = av_packet_alloc();
1468  if (!pkt)
1469  return AVERROR(ENOMEM);
1470 
1471  //emms_c();
1472  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1473  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1474  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1475  if (!b_lambda) // FIXME we should do this somewhere else
1476  b_lambda = p_lambda;
1477  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1479 
1480  for (i = 0; i < s->max_b_frames + 2; i++) {
1481  const MPVPicture *pre_input_ptr = i ? s->input_picture[i - 1] :
1482  s->next_pic.ptr;
1483 
1484  if (pre_input_ptr) {
1485  const uint8_t *data[4];
1486  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1487 
1488  if (!pre_input_ptr->shared && i) {
1489  data[0] += INPLACE_OFFSET;
1490  data[1] += INPLACE_OFFSET;
1491  data[2] += INPLACE_OFFSET;
1492  }
1493 
1494  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1495  s->tmp_frames[i]->linesize[0],
1496  data[0],
1497  pre_input_ptr->f->linesize[0],
1498  width, height);
1499  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1500  s->tmp_frames[i]->linesize[1],
1501  data[1],
1502  pre_input_ptr->f->linesize[1],
1503  width >> 1, height >> 1);
1504  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1505  s->tmp_frames[i]->linesize[2],
1506  data[2],
1507  pre_input_ptr->f->linesize[2],
1508  width >> 1, height >> 1);
1509  }
1510  }
1511 
1512  for (j = 0; j < s->max_b_frames + 1; j++) {
1513  AVCodecContext *c;
1514  int64_t rd = 0;
1515 
1516  if (!s->input_picture[j])
1517  break;
1518 
1520  if (!c) {
1521  ret = AVERROR(ENOMEM);
1522  goto fail;
1523  }
1524 
1525  c->width = width;
1526  c->height = height;
1528  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1529  c->mb_decision = s->avctx->mb_decision;
1530  c->me_cmp = s->avctx->me_cmp;
1531  c->mb_cmp = s->avctx->mb_cmp;
1532  c->me_sub_cmp = s->avctx->me_sub_cmp;
1533  c->pix_fmt = AV_PIX_FMT_YUV420P;
1534  c->time_base = s->avctx->time_base;
1535  c->max_b_frames = s->max_b_frames;
1536 
1537  ret = avcodec_open2(c, s->avctx->codec, NULL);
1538  if (ret < 0)
1539  goto fail;
1540 
1541 
1542  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1543  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1544 
1545  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1546  if (out_size < 0) {
1547  ret = out_size;
1548  goto fail;
1549  }
1550 
1551  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1552 
1553  for (i = 0; i < s->max_b_frames + 1; i++) {
1554  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1555 
1556  s->tmp_frames[i + 1]->pict_type = is_p ?
1558  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1559 
1560  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1561  if (out_size < 0) {
1562  ret = out_size;
1563  goto fail;
1564  }
1565 
1566  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1567  }
1568 
1569  /* get the delayed frames */
1571  if (out_size < 0) {
1572  ret = out_size;
1573  goto fail;
1574  }
1575  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1576 
1577  rd += c->error[0] + c->error[1] + c->error[2];
1578 
1579  if (rd < best_rd) {
1580  best_rd = rd;
1581  best_b_count = j;
1582  }
1583 
1584 fail:
1587  if (ret < 0) {
1588  best_b_count = ret;
1589  break;
1590  }
1591  }
1592 
1593  av_packet_free(&pkt);
1594 
1595  return best_b_count;
1596 }
1597 
1598 /**
1599  * Determines whether an input picture is discarded or not
1600  * and if not determines the length of the next chain of B frames
1601  * and moves these pictures (including the P frame) into
1602  * reordered_input_picture.
1603  * input_picture[0] is always NULL when exiting this function, even on error;
1604  * reordered_input_picture[0] is always NULL when exiting this function on error.
1605  */
1607 {
1608  /* Either nothing to do or can't do anything */
1609  if (s->reordered_input_picture[0] || !s->input_picture[0])
1610  return 0;
1611 
1612  /* set next picture type & ordering */
1613  if (s->frame_skip_threshold || s->frame_skip_factor) {
1614  if (s->picture_in_gop_number < s->gop_size &&
1615  s->next_pic.ptr &&
1616  skip_check(s, s->input_picture[0], s->next_pic.ptr)) {
1617  // FIXME check that the gop check above is +-1 correct
1618  av_refstruct_unref(&s->input_picture[0]);
1619 
1620  ff_vbv_update(s, 0);
1621 
1622  return 0;
1623  }
1624  }
1625 
1626  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1627  !s->next_pic.ptr || s->intra_only) {
1628  s->reordered_input_picture[0] = s->input_picture[0];
1629  s->input_picture[0] = NULL;
1630  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1631  s->reordered_input_picture[0]->coded_picture_number =
1632  s->coded_picture_number++;
1633  } else {
1634  int b_frames = 0;
1635 
1636  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1637  for (int i = 0; i < s->max_b_frames + 1; i++) {
1638  int pict_num = s->input_picture[0]->display_picture_number + i;
1639 
1640  if (pict_num >= s->rc_context.num_entries)
1641  break;
1642  if (!s->input_picture[i]) {
1643  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1644  break;
1645  }
1646 
1647  s->input_picture[i]->f->pict_type =
1648  s->rc_context.entry[pict_num].new_pict_type;
1649  }
1650  }
1651 
1652  if (s->b_frame_strategy == 0) {
1653  b_frames = s->max_b_frames;
1654  while (b_frames && !s->input_picture[b_frames])
1655  b_frames--;
1656  } else if (s->b_frame_strategy == 1) {
1657  int i;
1658  for (i = 1; i < s->max_b_frames + 1; i++) {
1659  if (s->input_picture[i] &&
1660  s->input_picture[i]->b_frame_score == 0) {
1661  s->input_picture[i]->b_frame_score =
1663  s->input_picture[i ]->f->data[0],
1664  s->input_picture[i - 1]->f->data[0],
1665  s->linesize) + 1;
1666  }
1667  }
1668  for (i = 0; i < s->max_b_frames + 1; i++) {
1669  if (!s->input_picture[i] ||
1670  s->input_picture[i]->b_frame_score - 1 >
1671  s->mb_num / s->b_sensitivity)
1672  break;
1673  }
1674 
1675  b_frames = FFMAX(0, i - 1);
1676 
1677  /* reset scores */
1678  for (i = 0; i < b_frames + 1; i++) {
1679  s->input_picture[i]->b_frame_score = 0;
1680  }
1681  } else if (s->b_frame_strategy == 2) {
1682  b_frames = estimate_best_b_count(s);
1683  if (b_frames < 0) {
1684  av_refstruct_unref(&s->input_picture[0]);
1685  return b_frames;
1686  }
1687  }
1688 
1689  emms_c();
1690 
1691  for (int i = b_frames - 1; i >= 0; i--) {
1692  int type = s->input_picture[i]->f->pict_type;
1693  if (type && type != AV_PICTURE_TYPE_B)
1694  b_frames = i;
1695  }
1696  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1697  b_frames == s->max_b_frames) {
1698  av_log(s->avctx, AV_LOG_ERROR,
1699  "warning, too many B-frames in a row\n");
1700  }
1701 
1702  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1703  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1704  s->gop_size > s->picture_in_gop_number) {
1705  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1706  } else {
1707  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1708  b_frames = 0;
1709  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1710  }
1711  }
1712 
1713  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1714  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1715  b_frames--;
1716 
1717  s->reordered_input_picture[0] = s->input_picture[b_frames];
1718  s->input_picture[b_frames] = NULL;
1719  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1720  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1721  s->reordered_input_picture[0]->coded_picture_number =
1722  s->coded_picture_number++;
1723  for (int i = 0; i < b_frames; i++) {
1724  s->reordered_input_picture[i + 1] = s->input_picture[i];
1725  s->input_picture[i] = NULL;
1726  s->reordered_input_picture[i + 1]->f->pict_type =
1728  s->reordered_input_picture[i + 1]->coded_picture_number =
1729  s->coded_picture_number++;
1730  }
1731  }
1732 
1733  return 0;
1734 }
1735 
1737 {
1738  int ret;
1739 
1740  av_assert1(!s->reordered_input_picture[0]);
1741 
1742  for (int i = 1; i <= MAX_B_FRAMES; i++)
1743  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1744  s->reordered_input_picture[MAX_B_FRAMES] = NULL;
1745 
1747  av_assert1(!s->input_picture[0]);
1748  if (ret < 0)
1749  return ret;
1750 
1751  av_frame_unref(s->new_pic);
1752 
1753  if (s->reordered_input_picture[0]) {
1754  s->reordered_input_picture[0]->reference =
1755  s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_B;
1756 
1757  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1758  // input is a shared pix, so we can't modify it -> allocate a new
1759  // one & ensure that the shared one is reuseable
1760  av_frame_move_ref(s->new_pic, s->reordered_input_picture[0]->f);
1761 
1762  ret = prepare_picture(s, s->reordered_input_picture[0]->f, s->new_pic);
1763  if (ret < 0)
1764  goto fail;
1765  } else {
1766  // input is not a shared pix -> reuse buffer for current_pix
1767  ret = av_frame_ref(s->new_pic, s->reordered_input_picture[0]->f);
1768  if (ret < 0)
1769  goto fail;
1770  for (int i = 0; i < MPV_MAX_PLANES; i++) {
1771  if (s->new_pic->data[i])
1772  s->new_pic->data[i] += INPLACE_OFFSET;
1773  }
1774  }
1775  s->cur_pic.ptr = s->reordered_input_picture[0];
1776  s->reordered_input_picture[0] = NULL;
1777  av_assert1(s->mb_width == s->buffer_pools.alloc_mb_width);
1778  av_assert1(s->mb_height == s->buffer_pools.alloc_mb_height);
1779  av_assert1(s->mb_stride == s->buffer_pools.alloc_mb_stride);
1780  ret = ff_mpv_alloc_pic_accessories(s->avctx, &s->cur_pic,
1781  &s->sc, &s->buffer_pools, s->mb_height);
1782  if (ret < 0) {
1783  ff_mpv_unref_picture(&s->cur_pic);
1784  return ret;
1785  }
1786  s->picture_number = s->cur_pic.ptr->display_picture_number;
1787 
1788  }
1789  return 0;
1790 fail:
1791  av_refstruct_unref(&s->reordered_input_picture[0]);
1792  return ret;
1793 }
1794 
1796 {
1797  if (s->unrestricted_mv &&
1798  s->cur_pic.reference &&
1799  !s->intra_only) {
1800  int hshift = s->chroma_x_shift;
1801  int vshift = s->chroma_y_shift;
1802  s->mpvencdsp.draw_edges(s->cur_pic.data[0],
1803  s->cur_pic.linesize[0],
1804  s->h_edge_pos, s->v_edge_pos,
1806  EDGE_TOP | EDGE_BOTTOM);
1807  s->mpvencdsp.draw_edges(s->cur_pic.data[1],
1808  s->cur_pic.linesize[1],
1809  s->h_edge_pos >> hshift,
1810  s->v_edge_pos >> vshift,
1811  EDGE_WIDTH >> hshift,
1812  EDGE_WIDTH >> vshift,
1813  EDGE_TOP | EDGE_BOTTOM);
1814  s->mpvencdsp.draw_edges(s->cur_pic.data[2],
1815  s->cur_pic.linesize[2],
1816  s->h_edge_pos >> hshift,
1817  s->v_edge_pos >> vshift,
1818  EDGE_WIDTH >> hshift,
1819  EDGE_WIDTH >> vshift,
1820  EDGE_TOP | EDGE_BOTTOM);
1821  }
1822 
1823  emms_c();
1824 
1825  s->last_pict_type = s->pict_type;
1826  s->last_lambda_for [s->pict_type] = s->cur_pic.ptr->f->quality;
1827  if (s->pict_type!= AV_PICTURE_TYPE_B)
1828  s->last_non_b_pict_type = s->pict_type;
1829 }
1830 
1832 {
1833  int intra, i;
1834 
1835  for (intra = 0; intra < 2; intra++) {
1836  if (s->dct_count[intra] > (1 << 16)) {
1837  for (i = 0; i < 64; i++) {
1838  s->dct_error_sum[intra][i] >>= 1;
1839  }
1840  s->dct_count[intra] >>= 1;
1841  }
1842 
1843  for (i = 0; i < 64; i++) {
1844  s->dct_offset[intra][i] = (s->noise_reduction *
1845  s->dct_count[intra] +
1846  s->dct_error_sum[intra][i] / 2) /
1847  (s->dct_error_sum[intra][i] + 1);
1848  }
1849  }
1850 }
1851 
1853 {
1854  s->cur_pic.ptr->f->pict_type = s->pict_type;
1855 
1856  if (s->pict_type != AV_PICTURE_TYPE_B) {
1857  ff_mpv_replace_picture(&s->last_pic, &s->next_pic);
1858  ff_mpv_replace_picture(&s->next_pic, &s->cur_pic);
1859  }
1860 
1861  if (s->dct_error_sum) {
1862  av_assert2(s->noise_reduction && s->encoding);
1864  }
1865 }
1866 
1868  const AVFrame *pic_arg, int *got_packet)
1869 {
1871  int stuffing_count, ret;
1872  int context_count = s->slice_context_count;
1873 
1874  ff_mpv_unref_picture(&s->cur_pic);
1875 
1876  s->vbv_ignore_qmax = 0;
1877 
1878  s->picture_in_gop_number++;
1879 
1880  if (load_input_picture(s, pic_arg) < 0)
1881  return -1;
1882 
1883  if (select_input_picture(s) < 0) {
1884  return -1;
1885  }
1886 
1887  /* output? */
1888  if (s->new_pic->data[0]) {
1889  int growing_buffer = context_count == 1 && !s->data_partitioning;
1890  size_t pkt_size = 10000 + s->mb_width * s->mb_height *
1891  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1892  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1893  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1894  if (ret < 0)
1895  return ret;
1896  }
1897  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1898  return ret;
1900  if (s->mb_info) {
1901  s->mb_info_ptr = av_packet_new_side_data(pkt,
1903  s->mb_width*s->mb_height*12);
1904  if (!s->mb_info_ptr)
1905  return AVERROR(ENOMEM);
1906  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1907  }
1908 
1909  s->pict_type = s->new_pic->pict_type;
1910  //emms_c();
1911  frame_start(s);
1912 vbv_retry:
1913  ret = encode_picture(s, pkt);
1914  if (growing_buffer) {
1915  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1916  pkt->data = s->pb.buf;
1918  }
1919  if (ret < 0)
1920  return -1;
1921 
1922  frame_end(s);
1923 
1924  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->out_format == FMT_MJPEG)
1925  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1926 
1927  if (avctx->rc_buffer_size) {
1928  RateControlContext *rcc = &s->rc_context;
1929  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1930  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1931  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1932 
1933  if (put_bits_count(&s->pb) > max_size &&
1934  s->lambda < s->lmax) {
1935  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1936  (s->qscale + 1) / s->qscale);
1937  if (s->adaptive_quant) {
1938  int i;
1939  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1940  s->lambda_table[i] =
1941  FFMAX(s->lambda_table[i] + min_step,
1942  s->lambda_table[i] * (s->qscale + 1) /
1943  s->qscale);
1944  }
1945  s->mb_skipped = 0; // done in frame_start()
1946  // done in encode_picture() so we must undo it
1947  if (s->pict_type == AV_PICTURE_TYPE_P) {
1948  s->no_rounding ^= s->flipflop_rounding;
1949  }
1950  if (s->pict_type != AV_PICTURE_TYPE_B) {
1951  s->time_base = s->last_time_base;
1952  s->last_non_b_time = s->time - s->pp_time;
1953  }
1954  s->vbv_ignore_qmax = 1;
1955  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1956  goto vbv_retry;
1957  }
1958 
1960  }
1961 
1964 
1965  for (int i = 0; i < MPV_MAX_PLANES; i++)
1966  avctx->error[i] += s->encoding_error[i];
1967  ff_side_data_set_encoder_stats(pkt, s->cur_pic.ptr->f->quality,
1968  s->encoding_error,
1970  s->pict_type);
1971 
1973  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1974  s->misc_bits + s->i_tex_bits +
1975  s->p_tex_bits);
1976  flush_put_bits(&s->pb);
1977  s->frame_bits = put_bits_count(&s->pb);
1978 
1979  stuffing_count = ff_vbv_update(s, s->frame_bits);
1980  s->stuffing_bits = 8*stuffing_count;
1981  if (stuffing_count) {
1982  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
1983  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
1984  return -1;
1985  }
1986 
1987  switch (s->codec_id) {
1990  while (stuffing_count--) {
1991  put_bits(&s->pb, 8, 0);
1992  }
1993  break;
1994  case AV_CODEC_ID_MPEG4:
1995  put_bits(&s->pb, 16, 0);
1996  put_bits(&s->pb, 16, 0x1C3);
1997  stuffing_count -= 4;
1998  while (stuffing_count--) {
1999  put_bits(&s->pb, 8, 0xFF);
2000  }
2001  break;
2002  default:
2003  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2004  s->stuffing_bits = 0;
2005  }
2006  flush_put_bits(&s->pb);
2007  s->frame_bits = put_bits_count(&s->pb);
2008  }
2009 
2010  /* update MPEG-1/2 vbv_delay for CBR */
2011  if (avctx->rc_max_rate &&
2013  s->out_format == FMT_MPEG1 &&
2014  90000LL * (avctx->rc_buffer_size - 1) <=
2015  avctx->rc_max_rate * 0xFFFFLL) {
2016  AVCPBProperties *props;
2017  size_t props_size;
2018 
2019  int vbv_delay, min_delay;
2020  double inbits = avctx->rc_max_rate *
2022  int minbits = s->frame_bits - 8 *
2023  (s->vbv_delay_pos - 1);
2024  double bits = s->rc_context.buffer_index + minbits - inbits;
2025  uint8_t *const vbv_delay_ptr = s->pb.buf + s->vbv_delay_pos;
2026 
2027  if (bits < 0)
2029  "Internal error, negative bits\n");
2030 
2031  av_assert1(s->repeat_first_field == 0);
2032 
2033  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2034  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2035  avctx->rc_max_rate;
2036 
2037  vbv_delay = FFMAX(vbv_delay, min_delay);
2038 
2039  av_assert0(vbv_delay < 0xFFFF);
2040 
2041  vbv_delay_ptr[0] &= 0xF8;
2042  vbv_delay_ptr[0] |= vbv_delay >> 13;
2043  vbv_delay_ptr[1] = vbv_delay >> 5;
2044  vbv_delay_ptr[2] &= 0x07;
2045  vbv_delay_ptr[2] |= vbv_delay << 3;
2046 
2047  props = av_cpb_properties_alloc(&props_size);
2048  if (!props)
2049  return AVERROR(ENOMEM);
2050  props->vbv_delay = vbv_delay * 300;
2051 
2053  (uint8_t*)props, props_size);
2054  if (ret < 0) {
2055  av_freep(&props);
2056  return ret;
2057  }
2058  }
2059  s->total_bits += s->frame_bits;
2060 
2061  pkt->pts = s->cur_pic.ptr->f->pts;
2062  pkt->duration = s->cur_pic.ptr->f->duration;
2063  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2064  if (!s->cur_pic.ptr->coded_picture_number)
2065  pkt->dts = pkt->pts - s->dts_delta;
2066  else
2067  pkt->dts = s->reordered_pts;
2068  s->reordered_pts = pkt->pts;
2069  } else
2070  pkt->dts = pkt->pts;
2071 
2072  // the no-delay case is handled in generic code
2074  ret = ff_encode_reordered_opaque(avctx, pkt, s->cur_pic.ptr->f);
2075  if (ret < 0)
2076  return ret;
2077  }
2078 
2079  if (s->cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2081  if (s->mb_info)
2083  } else {
2084  s->frame_bits = 0;
2085  }
2086 
2087  ff_mpv_unref_picture(&s->cur_pic);
2088 
2089  av_assert1((s->frame_bits & 7) == 0);
2090 
2091  pkt->size = s->frame_bits / 8;
2092  *got_packet = !!pkt->size;
2093  return 0;
2094 }
2095 
2097  int n, int threshold)
2098 {
2099  static const char tab[64] = {
2100  3, 2, 2, 1, 1, 1, 1, 1,
2101  1, 1, 1, 1, 1, 1, 1, 1,
2102  1, 1, 1, 1, 1, 1, 1, 1,
2103  0, 0, 0, 0, 0, 0, 0, 0,
2104  0, 0, 0, 0, 0, 0, 0, 0,
2105  0, 0, 0, 0, 0, 0, 0, 0,
2106  0, 0, 0, 0, 0, 0, 0, 0,
2107  0, 0, 0, 0, 0, 0, 0, 0
2108  };
2109  int score = 0;
2110  int run = 0;
2111  int i;
2112  int16_t *block = s->block[n];
2113  const int last_index = s->block_last_index[n];
2114  int skip_dc;
2115 
2116  if (threshold < 0) {
2117  skip_dc = 0;
2118  threshold = -threshold;
2119  } else
2120  skip_dc = 1;
2121 
2122  /* Are all we could set to zero already zero? */
2123  if (last_index <= skip_dc - 1)
2124  return;
2125 
2126  for (i = 0; i <= last_index; i++) {
2127  const int j = s->intra_scantable.permutated[i];
2128  const int level = FFABS(block[j]);
2129  if (level == 1) {
2130  if (skip_dc && i == 0)
2131  continue;
2132  score += tab[run];
2133  run = 0;
2134  } else if (level > 1) {
2135  return;
2136  } else {
2137  run++;
2138  }
2139  }
2140  if (score >= threshold)
2141  return;
2142  for (i = skip_dc; i <= last_index; i++) {
2143  const int j = s->intra_scantable.permutated[i];
2144  block[j] = 0;
2145  }
2146  if (block[0])
2147  s->block_last_index[n] = 0;
2148  else
2149  s->block_last_index[n] = -1;
2150 }
2151 
2152 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2153  int last_index)
2154 {
2155  int i;
2156  const int maxlevel = s->max_qcoeff;
2157  const int minlevel = s->min_qcoeff;
2158  int overflow = 0;
2159 
2160  if (s->mb_intra) {
2161  i = 1; // skip clipping of intra dc
2162  } else
2163  i = 0;
2164 
2165  for (; i <= last_index; i++) {
2166  const int j = s->intra_scantable.permutated[i];
2167  int level = block[j];
2168 
2169  if (level > maxlevel) {
2170  level = maxlevel;
2171  overflow++;
2172  } else if (level < minlevel) {
2173  level = minlevel;
2174  overflow++;
2175  }
2176 
2177  block[j] = level;
2178  }
2179 
2180  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2181  av_log(s->avctx, AV_LOG_INFO,
2182  "warning, clipping %d dct coefficients to %d..%d\n",
2183  overflow, minlevel, maxlevel);
2184 }
2185 
2186 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2187 {
2188  int x, y;
2189  // FIXME optimize
2190  for (y = 0; y < 8; y++) {
2191  for (x = 0; x < 8; x++) {
2192  int x2, y2;
2193  int sum = 0;
2194  int sqr = 0;
2195  int count = 0;
2196 
2197  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2198  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2199  int v = ptr[x2 + y2 * stride];
2200  sum += v;
2201  sqr += v * v;
2202  count++;
2203  }
2204  }
2205  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2206  }
2207  }
2208 }
2209 
2211  int motion_x, int motion_y,
2212  int mb_block_height,
2213  int mb_block_width,
2214  int mb_block_count,
2215  int chroma_x_shift,
2216  int chroma_y_shift,
2217  int chroma_format)
2218 {
2219 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2220  * and neither of these encoders currently supports 444. */
2221 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2222  (s)->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2223  int16_t weight[12][64];
2224  int16_t orig[12][64];
2225  const int mb_x = s->mb_x;
2226  const int mb_y = s->mb_y;
2227  int i;
2228  int skip_dct[12];
2229  int dct_offset = s->linesize * 8; // default for progressive frames
2230  int uv_dct_offset = s->uvlinesize * 8;
2231  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2232  ptrdiff_t wrap_y, wrap_c;
2233 
2234  for (i = 0; i < mb_block_count; i++)
2235  skip_dct[i] = s->skipdct;
2236 
2237  if (s->adaptive_quant) {
2238  const int last_qp = s->qscale;
2239  const int mb_xy = mb_x + mb_y * s->mb_stride;
2240 
2241  s->lambda = s->lambda_table[mb_xy];
2242  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2244 
2245  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2246  s->dquant = s->cur_pic.qscale_table[mb_xy] - last_qp;
2247 
2248  if (s->out_format == FMT_H263) {
2249  s->dquant = av_clip(s->dquant, -2, 2);
2250 
2251  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2252  if (!s->mb_intra) {
2253  if (s->pict_type == AV_PICTURE_TYPE_B) {
2254  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2255  s->dquant = 0;
2256  }
2257  if (s->mv_type == MV_TYPE_8X8)
2258  s->dquant = 0;
2259  }
2260  }
2261  }
2262  }
2263  ff_set_qscale(s, last_qp + s->dquant);
2264  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2265  ff_set_qscale(s, s->qscale + s->dquant);
2266 
2267  wrap_y = s->linesize;
2268  wrap_c = s->uvlinesize;
2269  ptr_y = s->new_pic->data[0] +
2270  (mb_y * 16 * wrap_y) + mb_x * 16;
2271  ptr_cb = s->new_pic->data[1] +
2272  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2273  ptr_cr = s->new_pic->data[2] +
2274  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2275 
2276  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2277  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2278  int cw = (s->width + chroma_x_shift) >> chroma_x_shift;
2279  int ch = (s->height + chroma_y_shift) >> chroma_y_shift;
2280  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2281  wrap_y, wrap_y,
2282  16, 16, mb_x * 16, mb_y * 16,
2283  s->width, s->height);
2284  ptr_y = ebuf;
2285  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2286  wrap_c, wrap_c,
2287  mb_block_width, mb_block_height,
2288  mb_x * mb_block_width, mb_y * mb_block_height,
2289  cw, ch);
2290  ptr_cb = ebuf + 16 * wrap_y;
2291  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2292  wrap_c, wrap_c,
2293  mb_block_width, mb_block_height,
2294  mb_x * mb_block_width, mb_y * mb_block_height,
2295  cw, ch);
2296  ptr_cr = ebuf + 16 * wrap_y + 16;
2297  }
2298 
2299  if (s->mb_intra) {
2300  if (INTERLACED_DCT(s)) {
2301  int progressive_score, interlaced_score;
2302 
2303  s->interlaced_dct = 0;
2304  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2305  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2306  NULL, wrap_y, 8) - 400;
2307 
2308  if (progressive_score > 0) {
2309  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2310  NULL, wrap_y * 2, 8) +
2311  s->ildct_cmp[1](s, ptr_y + wrap_y,
2312  NULL, wrap_y * 2, 8);
2313  if (progressive_score > interlaced_score) {
2314  s->interlaced_dct = 1;
2315 
2316  dct_offset = wrap_y;
2317  uv_dct_offset = wrap_c;
2318  wrap_y <<= 1;
2319  if (chroma_format == CHROMA_422 ||
2321  wrap_c <<= 1;
2322  }
2323  }
2324  }
2325 
2326  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2327  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2328  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2329  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2330 
2331  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2332  skip_dct[4] = 1;
2333  skip_dct[5] = 1;
2334  } else {
2335  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2336  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2337  if (chroma_format == CHROMA_422) {
2338  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2339  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2340  } else if (chroma_format == CHROMA_444) {
2341  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2342  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2343  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2344  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2345  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2346  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2347  }
2348  }
2349  } else {
2350  op_pixels_func (*op_pix)[4];
2351  qpel_mc_func (*op_qpix)[16];
2352  uint8_t *dest_y, *dest_cb, *dest_cr;
2353 
2354  dest_y = s->dest[0];
2355  dest_cb = s->dest[1];
2356  dest_cr = s->dest[2];
2357 
2358  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2359  op_pix = s->hdsp.put_pixels_tab;
2360  op_qpix = s->qdsp.put_qpel_pixels_tab;
2361  } else {
2362  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2363  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2364  }
2365 
2366  if (s->mv_dir & MV_DIR_FORWARD) {
2367  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2368  s->last_pic.data,
2369  op_pix, op_qpix);
2370  op_pix = s->hdsp.avg_pixels_tab;
2371  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2372  }
2373  if (s->mv_dir & MV_DIR_BACKWARD) {
2374  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2375  s->next_pic.data,
2376  op_pix, op_qpix);
2377  }
2378 
2379  if (INTERLACED_DCT(s)) {
2380  int progressive_score, interlaced_score;
2381 
2382  s->interlaced_dct = 0;
2383  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2384  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2385  ptr_y + wrap_y * 8,
2386  wrap_y, 8) - 400;
2387 
2388  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2389  progressive_score -= 400;
2390 
2391  if (progressive_score > 0) {
2392  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2393  wrap_y * 2, 8) +
2394  s->ildct_cmp[0](s, dest_y + wrap_y,
2395  ptr_y + wrap_y,
2396  wrap_y * 2, 8);
2397 
2398  if (progressive_score > interlaced_score) {
2399  s->interlaced_dct = 1;
2400 
2401  dct_offset = wrap_y;
2402  uv_dct_offset = wrap_c;
2403  wrap_y <<= 1;
2404  if (chroma_format == CHROMA_422)
2405  wrap_c <<= 1;
2406  }
2407  }
2408  }
2409 
2410  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2411  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2412  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2413  dest_y + dct_offset, wrap_y);
2414  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2415  dest_y + dct_offset + 8, wrap_y);
2416 
2417  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2418  skip_dct[4] = 1;
2419  skip_dct[5] = 1;
2420  } else {
2421  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2422  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2423  if (!chroma_y_shift) { /* 422 */
2424  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2425  dest_cb + uv_dct_offset, wrap_c);
2426  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2427  dest_cr + uv_dct_offset, wrap_c);
2428  }
2429  }
2430  /* pre quantization */
2431  if (s->mc_mb_var[s->mb_stride * mb_y + mb_x] < 2 * s->qscale * s->qscale) {
2432  // FIXME optimize
2433  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2434  skip_dct[0] = 1;
2435  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2436  skip_dct[1] = 1;
2437  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2438  wrap_y, 8) < 20 * s->qscale)
2439  skip_dct[2] = 1;
2440  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2441  wrap_y, 8) < 20 * s->qscale)
2442  skip_dct[3] = 1;
2443  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2444  skip_dct[4] = 1;
2445  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2446  skip_dct[5] = 1;
2447  if (!chroma_y_shift) { /* 422 */
2448  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2449  dest_cb + uv_dct_offset,
2450  wrap_c, 8) < 20 * s->qscale)
2451  skip_dct[6] = 1;
2452  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2453  dest_cr + uv_dct_offset,
2454  wrap_c, 8) < 20 * s->qscale)
2455  skip_dct[7] = 1;
2456  }
2457  }
2458  }
2459 
2460  if (s->quantizer_noise_shaping) {
2461  if (!skip_dct[0])
2462  get_visual_weight(weight[0], ptr_y , wrap_y);
2463  if (!skip_dct[1])
2464  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2465  if (!skip_dct[2])
2466  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2467  if (!skip_dct[3])
2468  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2469  if (!skip_dct[4])
2470  get_visual_weight(weight[4], ptr_cb , wrap_c);
2471  if (!skip_dct[5])
2472  get_visual_weight(weight[5], ptr_cr , wrap_c);
2473  if (!chroma_y_shift) { /* 422 */
2474  if (!skip_dct[6])
2475  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2476  wrap_c);
2477  if (!skip_dct[7])
2478  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2479  wrap_c);
2480  }
2481  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2482  }
2483 
2484  /* DCT & quantize */
2485  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2486  {
2487  for (i = 0; i < mb_block_count; i++) {
2488  if (!skip_dct[i]) {
2489  int overflow;
2490  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2491  // FIXME we could decide to change to quantizer instead of
2492  // clipping
2493  // JS: I don't think that would be a good idea it could lower
2494  // quality instead of improve it. Just INTRADC clipping
2495  // deserves changes in quantizer
2496  if (overflow)
2497  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2498  } else
2499  s->block_last_index[i] = -1;
2500  }
2501  if (s->quantizer_noise_shaping) {
2502  for (i = 0; i < mb_block_count; i++) {
2503  if (!skip_dct[i]) {
2504  s->block_last_index[i] =
2505  dct_quantize_refine(s, s->block[i], weight[i],
2506  orig[i], i, s->qscale);
2507  }
2508  }
2509  }
2510 
2511  if (s->luma_elim_threshold && !s->mb_intra)
2512  for (i = 0; i < 4; i++)
2513  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2514  if (s->chroma_elim_threshold && !s->mb_intra)
2515  for (i = 4; i < mb_block_count; i++)
2516  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2517 
2518  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2519  for (i = 0; i < mb_block_count; i++) {
2520  if (s->block_last_index[i] == -1)
2521  s->coded_score[i] = INT_MAX / 256;
2522  }
2523  }
2524  }
2525 
2526  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2527  s->block_last_index[4] =
2528  s->block_last_index[5] = 0;
2529  s->block[4][0] =
2530  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2531  if (!chroma_y_shift) { /* 422 / 444 */
2532  for (i=6; i<12; i++) {
2533  s->block_last_index[i] = 0;
2534  s->block[i][0] = s->block[4][0];
2535  }
2536  }
2537  }
2538 
2539  // non c quantize code returns incorrect block_last_index FIXME
2540  if (s->alternate_scan && s->dct_quantize != dct_quantize_c) {
2541  for (i = 0; i < mb_block_count; i++) {
2542  int j;
2543  if (s->block_last_index[i] > 0) {
2544  for (j = 63; j > 0; j--) {
2545  if (s->block[i][s->intra_scantable.permutated[j]])
2546  break;
2547  }
2548  s->block_last_index[i] = j;
2549  }
2550  }
2551  }
2552 
2553  /* huffman encode */
2554  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2557  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2558  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2559  break;
2560  case AV_CODEC_ID_MPEG4:
2561  if (CONFIG_MPEG4_ENCODER)
2562  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2563  break;
2564  case AV_CODEC_ID_MSMPEG4V2:
2565  case AV_CODEC_ID_MSMPEG4V3:
2566  case AV_CODEC_ID_WMV1:
2567  if (CONFIG_MSMPEG4ENC)
2568  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2569  break;
2570  case AV_CODEC_ID_WMV2:
2571  if (CONFIG_WMV2_ENCODER)
2572  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2573  break;
2574  case AV_CODEC_ID_H261:
2575  if (CONFIG_H261_ENCODER)
2576  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2577  break;
2578  case AV_CODEC_ID_H263:
2579  case AV_CODEC_ID_H263P:
2580  case AV_CODEC_ID_FLV1:
2581  case AV_CODEC_ID_RV10:
2582  case AV_CODEC_ID_RV20:
2583  if (CONFIG_H263_ENCODER)
2584  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2585  break;
2586 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
2587  case AV_CODEC_ID_MJPEG:
2588  case AV_CODEC_ID_AMV:
2589  ff_mjpeg_encode_mb(s, s->block);
2590  break;
2591 #endif
2592  case AV_CODEC_ID_SPEEDHQ:
2593  if (CONFIG_SPEEDHQ_ENCODER)
2594  ff_speedhq_encode_mb(s, s->block);
2595  break;
2596  default:
2597  av_assert1(0);
2598  }
2599 }
2600 
2601 static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2602 {
2603  if (s->chroma_format == CHROMA_420)
2604  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2605  else if (s->chroma_format == CHROMA_422)
2606  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2607  else
2608  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2609 }
2610 
2612  const MpegEncContext *s)
2613 {
2614  int i;
2615 
2616  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2617 
2618  /* MPEG-1 */
2619  d->mb_skip_run= s->mb_skip_run;
2620  for(i=0; i<3; i++)
2621  d->last_dc[i] = s->last_dc[i];
2622 
2623  /* statistics */
2624  d->mv_bits= s->mv_bits;
2625  d->i_tex_bits= s->i_tex_bits;
2626  d->p_tex_bits= s->p_tex_bits;
2627  d->i_count= s->i_count;
2628  d->misc_bits= s->misc_bits;
2629  d->last_bits= 0;
2630 
2631  d->mb_skipped= 0;
2632  d->qscale= s->qscale;
2633  d->dquant= s->dquant;
2634 
2635  d->esc3_level_length= s->esc3_level_length;
2636 }
2637 
2639  const MpegEncContext *s)
2640 {
2641  int i;
2642 
2643  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2644  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2645 
2646  /* MPEG-1 */
2647  d->mb_skip_run= s->mb_skip_run;
2648  for(i=0; i<3; i++)
2649  d->last_dc[i] = s->last_dc[i];
2650 
2651  /* statistics */
2652  d->mv_bits= s->mv_bits;
2653  d->i_tex_bits= s->i_tex_bits;
2654  d->p_tex_bits= s->p_tex_bits;
2655  d->i_count= s->i_count;
2656  d->misc_bits= s->misc_bits;
2657 
2658  d->mb_intra= s->mb_intra;
2659  d->mb_skipped= s->mb_skipped;
2660  d->mv_type= s->mv_type;
2661  d->mv_dir= s->mv_dir;
2662  d->pb= s->pb;
2663  if(s->data_partitioning){
2664  d->pb2= s->pb2;
2665  d->tex_pb= s->tex_pb;
2666  }
2667  d->block= s->block;
2668  for(i=0; i<8; i++)
2669  d->block_last_index[i]= s->block_last_index[i];
2670  d->interlaced_dct= s->interlaced_dct;
2671  d->qscale= s->qscale;
2672 
2673  d->esc3_level_length= s->esc3_level_length;
2674 }
2675 
2678  int *dmin, int *next_block, int motion_x, int motion_y)
2679 {
2680  int score;
2681  uint8_t *dest_backup[3];
2682 
2683  copy_context_before_encode(s, backup);
2684 
2685  s->block= s->blocks[*next_block];
2686  s->pb= pb[*next_block];
2687  if(s->data_partitioning){
2688  s->pb2 = pb2 [*next_block];
2689  s->tex_pb= tex_pb[*next_block];
2690  }
2691 
2692  if(*next_block){
2693  memcpy(dest_backup, s->dest, sizeof(s->dest));
2694  s->dest[0] = s->sc.rd_scratchpad;
2695  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2696  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2697  av_assert0(s->linesize >= 32); //FIXME
2698  }
2699 
2700  encode_mb(s, motion_x, motion_y);
2701 
2702  score= put_bits_count(&s->pb);
2703  if(s->data_partitioning){
2704  score+= put_bits_count(&s->pb2);
2705  score+= put_bits_count(&s->tex_pb);
2706  }
2707 
2708  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2709  mpv_reconstruct_mb(s, s->block);
2710 
2711  score *= s->lambda2;
2712  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2713  }
2714 
2715  if(*next_block){
2716  memcpy(s->dest, dest_backup, sizeof(s->dest));
2717  }
2718 
2719  if(score<*dmin){
2720  *dmin= score;
2721  *next_block^=1;
2722 
2724  }
2725 }
2726 
2727 static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride){
2728  const uint32_t *sq = ff_square_tab + 256;
2729  int acc=0;
2730  int x,y;
2731 
2732  if(w==16 && h==16)
2733  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2734  else if(w==8 && h==8)
2735  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2736 
2737  for(y=0; y<h; y++){
2738  for(x=0; x<w; x++){
2739  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2740  }
2741  }
2742 
2743  av_assert2(acc>=0);
2744 
2745  return acc;
2746 }
2747 
2748 static int sse_mb(MpegEncContext *s){
2749  int w= 16;
2750  int h= 16;
2751  int chroma_mb_w = w >> s->chroma_x_shift;
2752  int chroma_mb_h = h >> s->chroma_y_shift;
2753 
2754  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2755  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2756 
2757  if(w==16 && h==16)
2758  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2759  s->dest[0], s->linesize, 16) +
2760  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2761  s->dest[1], s->uvlinesize, chroma_mb_h) +
2762  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2763  s->dest[2], s->uvlinesize, chroma_mb_h);
2764  else
2765  return sse(s, s->new_pic->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16,
2766  s->dest[0], w, h, s->linesize) +
2767  sse(s, s->new_pic->data[1] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2768  s->dest[1], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize) +
2769  sse(s, s->new_pic->data[2] + s->mb_x * chroma_mb_w + s->mb_y * s->uvlinesize * chroma_mb_h,
2770  s->dest[2], w >> s->chroma_x_shift, h >> s->chroma_y_shift, s->uvlinesize);
2771 }
2772 
2774  MpegEncContext *s= *(void**)arg;
2775 
2776 
2777  s->me.pre_pass=1;
2778  s->me.dia_size= s->avctx->pre_dia_size;
2779  s->first_slice_line=1;
2780  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2781  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2782  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2783  }
2784  s->first_slice_line=0;
2785  }
2786 
2787  s->me.pre_pass=0;
2788 
2789  return 0;
2790 }
2791 
2793  MpegEncContext *s= *(void**)arg;
2794 
2795  s->me.dia_size= s->avctx->dia_size;
2796  s->first_slice_line=1;
2797  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2798  s->mb_x=0; //for block init below
2800  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2801  s->block_index[0]+=2;
2802  s->block_index[1]+=2;
2803  s->block_index[2]+=2;
2804  s->block_index[3]+=2;
2805 
2806  /* compute motion vector & mb_type and store in context */
2807  if(s->pict_type==AV_PICTURE_TYPE_B)
2808  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2809  else
2810  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2811  }
2812  s->first_slice_line=0;
2813  }
2814  return 0;
2815 }
2816 
2817 static int mb_var_thread(AVCodecContext *c, void *arg){
2818  MpegEncContext *s= *(void**)arg;
2819  int mb_x, mb_y;
2820 
2821  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2822  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2823  int xx = mb_x * 16;
2824  int yy = mb_y * 16;
2825  const uint8_t *pix = s->new_pic->data[0] + (yy * s->linesize) + xx;
2826  int varc;
2827  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2828 
2829  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2830  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2831 
2832  s->mb_var [s->mb_stride * mb_y + mb_x] = varc;
2833  s->mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2834  s->me.mb_var_sum_temp += varc;
2835  }
2836  }
2837  return 0;
2838 }
2839 
2841  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2842  if(s->partitioned_frame){
2844  }
2845 
2846  ff_mpeg4_stuffing(&s->pb);
2847  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2848  s->out_format == FMT_MJPEG) {
2850  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2852  }
2853 
2854  flush_put_bits(&s->pb);
2855 
2856  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2857  s->misc_bits+= get_bits_diff(s);
2858 }
2859 
2861 {
2862  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2863  int offset = put_bits_count(&s->pb);
2864  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2865  int gobn = s->mb_y / s->gob_index;
2866  int pred_x, pred_y;
2867  if (CONFIG_H263_ENCODER)
2868  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2869  bytestream_put_le32(&ptr, offset);
2870  bytestream_put_byte(&ptr, s->qscale);
2871  bytestream_put_byte(&ptr, gobn);
2872  bytestream_put_le16(&ptr, mba);
2873  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2874  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2875  /* 4MV not implemented */
2876  bytestream_put_byte(&ptr, 0); /* hmv2 */
2877  bytestream_put_byte(&ptr, 0); /* vmv2 */
2878 }
2879 
2880 static void update_mb_info(MpegEncContext *s, int startcode)
2881 {
2882  if (!s->mb_info)
2883  return;
2884  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2885  s->mb_info_size += 12;
2886  s->prev_mb_info = s->last_mb_info;
2887  }
2888  if (startcode) {
2889  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2890  /* This might have incremented mb_info_size above, and we return without
2891  * actually writing any info into that slot yet. But in that case,
2892  * this will be called again at the start of the after writing the
2893  * start code, actually writing the mb info. */
2894  return;
2895  }
2896 
2897  s->last_mb_info = put_bytes_count(&s->pb, 0);
2898  if (!s->mb_info_size)
2899  s->mb_info_size += 12;
2900  write_mb_info(s);
2901 }
2902 
2903 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2904 {
2905  if (put_bytes_left(&s->pb, 0) < threshold
2906  && s->slice_context_count == 1
2907  && s->pb.buf == s->avctx->internal->byte_buffer) {
2908  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2909 
2910  uint8_t *new_buffer = NULL;
2911  int new_buffer_size = 0;
2912 
2913  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2914  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2915  return AVERROR(ENOMEM);
2916  }
2917 
2918  emms_c();
2919 
2920  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2921  s->avctx->internal->byte_buffer_size + size_increase);
2922  if (!new_buffer)
2923  return AVERROR(ENOMEM);
2924 
2925  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2926  av_free(s->avctx->internal->byte_buffer);
2927  s->avctx->internal->byte_buffer = new_buffer;
2928  s->avctx->internal->byte_buffer_size = new_buffer_size;
2929  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2930  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2931  }
2932  if (put_bytes_left(&s->pb, 0) < threshold)
2933  return AVERROR(EINVAL);
2934  return 0;
2935 }
2936 
2937 static int encode_thread(AVCodecContext *c, void *arg){
2938  MpegEncContext *s= *(void**)arg;
2939  int mb_x, mb_y, mb_y_order;
2940  int chr_h= 16>>s->chroma_y_shift;
2941  int i, j;
2942  MpegEncContext best_s = { 0 }, backup_s;
2943  uint8_t bit_buf[2][MAX_MB_BYTES];
2944  uint8_t bit_buf2[2][MAX_MB_BYTES];
2945  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2946  PutBitContext pb[2], pb2[2], tex_pb[2];
2947 
2948  for(i=0; i<2; i++){
2949  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2950  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2951  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2952  }
2953 
2954  s->last_bits= put_bits_count(&s->pb);
2955  s->mv_bits=0;
2956  s->misc_bits=0;
2957  s->i_tex_bits=0;
2958  s->p_tex_bits=0;
2959  s->i_count=0;
2960 
2961  for(i=0; i<3; i++){
2962  /* init last dc values */
2963  /* note: quant matrix value (8) is implied here */
2964  s->last_dc[i] = 128 << s->intra_dc_precision;
2965 
2966  s->encoding_error[i] = 0;
2967  }
2968  if(s->codec_id==AV_CODEC_ID_AMV){
2969  s->last_dc[0] = 128*8/13;
2970  s->last_dc[1] = 128*8/14;
2971  s->last_dc[2] = 128*8/14;
2972  }
2973  s->mb_skip_run = 0;
2974  memset(s->last_mv, 0, sizeof(s->last_mv));
2975 
2976  s->last_mv_dir = 0;
2977 
2978  switch(s->codec_id){
2979  case AV_CODEC_ID_H263:
2980  case AV_CODEC_ID_H263P:
2981  case AV_CODEC_ID_FLV1:
2982  if (CONFIG_H263_ENCODER)
2983  s->gob_index = H263_GOB_HEIGHT(s->height);
2984  break;
2985  case AV_CODEC_ID_MPEG4:
2986  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2988  break;
2989  }
2990 
2991  s->resync_mb_x=0;
2992  s->resync_mb_y=0;
2993  s->first_slice_line = 1;
2994  s->ptr_lastgob = s->pb.buf;
2995  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
2996  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
2997  int first_in_slice;
2998  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
2999  if (first_in_slice && mb_y_order != s->start_mb_y)
3001  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3002  } else {
3003  mb_y = mb_y_order;
3004  }
3005  s->mb_x=0;
3006  s->mb_y= mb_y;
3007 
3008  ff_set_qscale(s, s->qscale);
3010 
3011  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3012  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3013  int mb_type= s->mb_type[xy];
3014 // int d;
3015  int dmin= INT_MAX;
3016  int dir;
3017  int size_increase = s->avctx->internal->byte_buffer_size/4
3018  + s->mb_width*MAX_MB_BYTES;
3019 
3021  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3022  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3023  return -1;
3024  }
3025  if(s->data_partitioning){
3026  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3027  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3028  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3029  return -1;
3030  }
3031  }
3032 
3033  s->mb_x = mb_x;
3034  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3035  ff_update_block_index(s, 8, 0, s->chroma_x_shift);
3036 
3037  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3039  xy= s->mb_y*s->mb_stride + s->mb_x;
3040  mb_type= s->mb_type[xy];
3041  }
3042 
3043  /* write gob / video packet header */
3044  if(s->rtp_mode){
3045  int current_packet_size, is_gob_start;
3046 
3047  current_packet_size = put_bytes_count(&s->pb, 1)
3048  - (s->ptr_lastgob - s->pb.buf);
3049 
3050  is_gob_start = s->rtp_payload_size &&
3051  current_packet_size >= s->rtp_payload_size &&
3052  mb_y + mb_x > 0;
3053 
3054  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3055 
3056  switch(s->codec_id){
3057  case AV_CODEC_ID_H263:
3058  case AV_CODEC_ID_H263P:
3059  if(!s->h263_slice_structured)
3060  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3061  break;
3063  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3065  if(s->mb_skip_run) is_gob_start=0;
3066  break;
3067  case AV_CODEC_ID_MJPEG:
3068  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3069  break;
3070  }
3071 
3072  if(is_gob_start){
3073  if(s->start_mb_y != mb_y || mb_x!=0){
3074  write_slice_end(s);
3075 
3076  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3078  }
3079  }
3080 
3081  av_assert2((put_bits_count(&s->pb)&7) == 0);
3082  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3083 
3084  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3085  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->mb_x + s->mb_y;
3086  int d = 100 / s->error_rate;
3087  if(r % d == 0){
3088  current_packet_size=0;
3089  s->pb.buf_ptr= s->ptr_lastgob;
3090  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3091  }
3092  }
3093 
3094  switch(s->codec_id){
3095  case AV_CODEC_ID_MPEG4:
3096  if (CONFIG_MPEG4_ENCODER) {
3099  }
3100  break;
3103  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3106  }
3107  break;
3108  case AV_CODEC_ID_H263:
3109  case AV_CODEC_ID_H263P:
3110  if (CONFIG_H263_ENCODER) {
3111  update_mb_info(s, 1);
3113  }
3114  break;
3115  }
3116 
3117  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3118  int bits= put_bits_count(&s->pb);
3119  s->misc_bits+= bits - s->last_bits;
3120  s->last_bits= bits;
3121  }
3122 
3123  s->ptr_lastgob += current_packet_size;
3124  s->first_slice_line=1;
3125  s->resync_mb_x=mb_x;
3126  s->resync_mb_y=mb_y;
3127  }
3128  }
3129 
3130  if( (s->resync_mb_x == s->mb_x)
3131  && s->resync_mb_y+1 == s->mb_y){
3132  s->first_slice_line=0;
3133  }
3134 
3135  s->mb_skipped=0;
3136  s->dquant=0; //only for QP_RD
3137 
3138  update_mb_info(s, 0);
3139 
3140  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3141  int next_block=0;
3142  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3143 
3144  copy_context_before_encode(&backup_s, s);
3145  backup_s.pb= s->pb;
3146  best_s.data_partitioning= s->data_partitioning;
3147  best_s.partitioned_frame= s->partitioned_frame;
3148  if(s->data_partitioning){
3149  backup_s.pb2= s->pb2;
3150  backup_s.tex_pb= s->tex_pb;
3151  }
3152 
3154  s->mv_dir = MV_DIR_FORWARD;
3155  s->mv_type = MV_TYPE_16X16;
3156  s->mb_intra= 0;
3157  s->mv[0][0][0] = s->p_mv_table[xy][0];
3158  s->mv[0][0][1] = s->p_mv_table[xy][1];
3159  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3160  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3161  }
3163  s->mv_dir = MV_DIR_FORWARD;
3164  s->mv_type = MV_TYPE_FIELD;
3165  s->mb_intra= 0;
3166  for(i=0; i<2; i++){
3167  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3168  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3169  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3170  }
3171  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3172  &dmin, &next_block, 0, 0);
3173  }
3175  s->mv_dir = MV_DIR_FORWARD;
3176  s->mv_type = MV_TYPE_16X16;
3177  s->mb_intra= 0;
3178  s->mv[0][0][0] = 0;
3179  s->mv[0][0][1] = 0;
3180  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3181  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3182  }
3184  s->mv_dir = MV_DIR_FORWARD;
3185  s->mv_type = MV_TYPE_8X8;
3186  s->mb_intra= 0;
3187  for(i=0; i<4; i++){
3188  s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
3189  s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
3190  }
3191  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3192  &dmin, &next_block, 0, 0);
3193  }
3195  s->mv_dir = MV_DIR_FORWARD;
3196  s->mv_type = MV_TYPE_16X16;
3197  s->mb_intra= 0;
3198  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3199  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3200  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3201  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3202  }
3204  s->mv_dir = MV_DIR_BACKWARD;
3205  s->mv_type = MV_TYPE_16X16;
3206  s->mb_intra= 0;
3207  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3208  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3209  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3210  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3211  }
3213  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3214  s->mv_type = MV_TYPE_16X16;
3215  s->mb_intra= 0;
3216  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3217  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3218  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3219  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3220  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3221  &dmin, &next_block, 0, 0);
3222  }
3224  s->mv_dir = MV_DIR_FORWARD;
3225  s->mv_type = MV_TYPE_FIELD;
3226  s->mb_intra= 0;
3227  for(i=0; i<2; i++){
3228  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3229  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3230  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3231  }
3232  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3233  &dmin, &next_block, 0, 0);
3234  }
3236  s->mv_dir = MV_DIR_BACKWARD;
3237  s->mv_type = MV_TYPE_FIELD;
3238  s->mb_intra= 0;
3239  for(i=0; i<2; i++){
3240  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3241  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3242  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3243  }
3244  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3245  &dmin, &next_block, 0, 0);
3246  }
3248  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3249  s->mv_type = MV_TYPE_FIELD;
3250  s->mb_intra= 0;
3251  for(dir=0; dir<2; dir++){
3252  for(i=0; i<2; i++){
3253  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3254  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3255  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3256  }
3257  }
3258  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3259  &dmin, &next_block, 0, 0);
3260  }
3262  s->mv_dir = 0;
3263  s->mv_type = MV_TYPE_16X16;
3264  s->mb_intra= 1;
3265  s->mv[0][0][0] = 0;
3266  s->mv[0][0][1] = 0;
3267  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3268  &dmin, &next_block, 0, 0);
3269  s->mbintra_table[xy] = 1;
3270  }
3271 
3272  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3273  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3274  const int last_qp= backup_s.qscale;
3275  int qpi, qp, dc[6];
3276  int16_t ac[6][16];
3277  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3278  static const int dquant_tab[4]={-1,1,-2,2};
3279  int storecoefs = s->mb_intra && s->dc_val[0];
3280 
3281  av_assert2(backup_s.dquant == 0);
3282 
3283  //FIXME intra
3284  s->mv_dir= best_s.mv_dir;
3285  s->mv_type = MV_TYPE_16X16;
3286  s->mb_intra= best_s.mb_intra;
3287  s->mv[0][0][0] = best_s.mv[0][0][0];
3288  s->mv[0][0][1] = best_s.mv[0][0][1];
3289  s->mv[1][0][0] = best_s.mv[1][0][0];
3290  s->mv[1][0][1] = best_s.mv[1][0][1];
3291 
3292  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3293  for(; qpi<4; qpi++){
3294  int dquant= dquant_tab[qpi];
3295  qp= last_qp + dquant;
3296  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3297  continue;
3298  backup_s.dquant= dquant;
3299  if(storecoefs){
3300  for(i=0; i<6; i++){
3301  dc[i]= s->dc_val[0][ s->block_index[i] ];
3302  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3303  }
3304  }
3305 
3306  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3307  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3308  if(best_s.qscale != qp){
3309  if(storecoefs){
3310  for(i=0; i<6; i++){
3311  s->dc_val[0][ s->block_index[i] ]= dc[i];
3312  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3313  }
3314  }
3315  }
3316  }
3317  }
3318  }
3319  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3320  int mx= s->b_direct_mv_table[xy][0];
3321  int my= s->b_direct_mv_table[xy][1];
3322 
3323  backup_s.dquant = 0;
3324  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3325  s->mb_intra= 0;
3327  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3328  &dmin, &next_block, mx, my);
3329  }
3330  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3331  backup_s.dquant = 0;
3332  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3333  s->mb_intra= 0;
3334  ff_mpeg4_set_direct_mv(s, 0, 0);
3335  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3336  &dmin, &next_block, 0, 0);
3337  }
3338  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3339  int coded=0;
3340  for(i=0; i<6; i++)
3341  coded |= s->block_last_index[i];
3342  if(coded){
3343  int mx,my;
3344  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3345  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3346  mx=my=0; //FIXME find the one we actually used
3348  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3349  mx= s->mv[1][0][0];
3350  my= s->mv[1][0][1];
3351  }else{
3352  mx= s->mv[0][0][0];
3353  my= s->mv[0][0][1];
3354  }
3355 
3356  s->mv_dir= best_s.mv_dir;
3357  s->mv_type = best_s.mv_type;
3358  s->mb_intra= 0;
3359 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3360  s->mv[0][0][1] = best_s.mv[0][0][1];
3361  s->mv[1][0][0] = best_s.mv[1][0][0];
3362  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3363  backup_s.dquant= 0;
3364  s->skipdct=1;
3365  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3366  &dmin, &next_block, mx, my);
3367  s->skipdct=0;
3368  }
3369  }
3370 
3371  copy_context_after_encode(s, &best_s);
3372 
3373  pb_bits_count= put_bits_count(&s->pb);
3374  flush_put_bits(&s->pb);
3375  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3376  s->pb= backup_s.pb;
3377 
3378  if(s->data_partitioning){
3379  pb2_bits_count= put_bits_count(&s->pb2);
3380  flush_put_bits(&s->pb2);
3381  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3382  s->pb2= backup_s.pb2;
3383 
3384  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3385  flush_put_bits(&s->tex_pb);
3386  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3387  s->tex_pb= backup_s.tex_pb;
3388  }
3389  s->last_bits= put_bits_count(&s->pb);
3390 
3391  if (CONFIG_H263_ENCODER &&
3392  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3394 
3395  if(next_block==0){ //FIXME 16 vs linesize16
3396  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3397  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3398  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3399  }
3400 
3401  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3402  mpv_reconstruct_mb(s, s->block);
3403  } else {
3404  int motion_x = 0, motion_y = 0;
3405  s->mv_type=MV_TYPE_16X16;
3406  // only one MB-Type possible
3407 
3408  switch(mb_type){
3410  s->mv_dir = 0;
3411  s->mb_intra= 1;
3412  motion_x= s->mv[0][0][0] = 0;
3413  motion_y= s->mv[0][0][1] = 0;
3414  s->mbintra_table[xy] = 1;
3415  break;
3417  s->mv_dir = MV_DIR_FORWARD;
3418  s->mb_intra= 0;
3419  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3420  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3421  break;
3423  s->mv_dir = MV_DIR_FORWARD;
3424  s->mv_type = MV_TYPE_FIELD;
3425  s->mb_intra= 0;
3426  for(i=0; i<2; i++){
3427  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3428  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3429  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3430  }
3431  break;
3433  s->mv_dir = MV_DIR_FORWARD;
3434  s->mv_type = MV_TYPE_8X8;
3435  s->mb_intra= 0;
3436  for(i=0; i<4; i++){
3437  s->mv[0][i][0] = s->cur_pic.motion_val[0][s->block_index[i]][0];
3438  s->mv[0][i][1] = s->cur_pic.motion_val[0][s->block_index[i]][1];
3439  }
3440  break;
3442  if (CONFIG_MPEG4_ENCODER) {
3444  s->mb_intra= 0;
3445  motion_x=s->b_direct_mv_table[xy][0];
3446  motion_y=s->b_direct_mv_table[xy][1];
3447  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3448  }
3449  break;
3451  if (CONFIG_MPEG4_ENCODER) {
3453  s->mb_intra= 0;
3454  ff_mpeg4_set_direct_mv(s, 0, 0);
3455  }
3456  break;
3458  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3459  s->mb_intra= 0;
3460  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3461  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3462  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3463  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3464  break;
3466  s->mv_dir = MV_DIR_BACKWARD;
3467  s->mb_intra= 0;
3468  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3469  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3470  break;
3472  s->mv_dir = MV_DIR_FORWARD;
3473  s->mb_intra= 0;
3474  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3475  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3476  break;
3478  s->mv_dir = MV_DIR_FORWARD;
3479  s->mv_type = MV_TYPE_FIELD;
3480  s->mb_intra= 0;
3481  for(i=0; i<2; i++){
3482  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3483  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3484  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3485  }
3486  break;
3488  s->mv_dir = MV_DIR_BACKWARD;
3489  s->mv_type = MV_TYPE_FIELD;
3490  s->mb_intra= 0;
3491  for(i=0; i<2; i++){
3492  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3493  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3494  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3495  }
3496  break;
3498  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3499  s->mv_type = MV_TYPE_FIELD;
3500  s->mb_intra= 0;
3501  for(dir=0; dir<2; dir++){
3502  for(i=0; i<2; i++){
3503  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3504  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3505  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3506  }
3507  }
3508  break;
3509  default:
3510  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3511  }
3512 
3513  encode_mb(s, motion_x, motion_y);
3514 
3515  // RAL: Update last macroblock type
3516  s->last_mv_dir = s->mv_dir;
3517 
3518  if (CONFIG_H263_ENCODER &&
3519  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3521 
3522  mpv_reconstruct_mb(s, s->block);
3523  }
3524 
3525  s->cur_pic.qscale_table[xy] = s->qscale;
3526 
3527  /* clean the MV table in IPS frames for direct mode in B-frames */
3528  if(s->mb_intra /* && I,P,S_TYPE */){
3529  s->p_mv_table[xy][0]=0;
3530  s->p_mv_table[xy][1]=0;
3531  } else if ((s->h263_pred || s->h263_aic) && s->mbintra_table[xy])
3533 
3534  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3535  int w= 16;
3536  int h= 16;
3537 
3538  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3539  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3540 
3541  s->encoding_error[0] += sse(
3542  s, s->new_pic->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3543  s->dest[0], w, h, s->linesize);
3544  s->encoding_error[1] += sse(
3545  s, s->new_pic->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3546  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3547  s->encoding_error[2] += sse(
3548  s, s->new_pic->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3549  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3550  }
3551  if(s->loop_filter){
3552  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3554  }
3555  ff_dlog(s->avctx, "MB %d %d bits\n",
3556  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3557  }
3558  }
3559 
3560 #if CONFIG_MSMPEG4ENC
3561  //not beautiful here but we must write it before flushing so it has to be here
3562  if (s->msmpeg4_version != MSMP4_UNUSED && s->msmpeg4_version < MSMP4_WMV1 &&
3563  s->pict_type == AV_PICTURE_TYPE_I)
3565 #endif
3566 
3567  write_slice_end(s);
3568 
3569  return 0;
3570 }
3571 
3572 #define MERGE(field) dst->field += src->field; src->field=0
3574  MERGE(me.scene_change_score);
3575  MERGE(me.mc_mb_var_sum_temp);
3576  MERGE(me.mb_var_sum_temp);
3577 }
3578 
3580  int i;
3581 
3582  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3583  MERGE(dct_count[1]);
3584  MERGE(mv_bits);
3585  MERGE(i_tex_bits);
3586  MERGE(p_tex_bits);
3587  MERGE(i_count);
3588  MERGE(misc_bits);
3589  MERGE(encoding_error[0]);
3590  MERGE(encoding_error[1]);
3591  MERGE(encoding_error[2]);
3592 
3593  if (dst->noise_reduction){
3594  for(i=0; i<64; i++){
3595  MERGE(dct_error_sum[0][i]);
3596  MERGE(dct_error_sum[1][i]);
3597  }
3598  }
3599 
3600  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3601  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3602  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3603  flush_put_bits(&dst->pb);
3604 }
3605 
3606 static int estimate_qp(MpegEncContext *s, int dry_run){
3607  if (s->next_lambda){
3608  s->cur_pic.ptr->f->quality = s->next_lambda;
3609  if(!dry_run) s->next_lambda= 0;
3610  } else if (!s->fixed_qscale) {
3611  int quality = ff_rate_estimate_qscale(s, dry_run);
3612  s->cur_pic.ptr->f->quality = quality;
3613  if (s->cur_pic.ptr->f->quality < 0)
3614  return -1;
3615  }
3616 
3617  if(s->adaptive_quant){
3618  init_qscale_tab(s);
3619 
3620  switch(s->codec_id){
3621  case AV_CODEC_ID_MPEG4:
3622  if (CONFIG_MPEG4_ENCODER)
3624  break;
3625  case AV_CODEC_ID_H263:
3626  case AV_CODEC_ID_H263P:
3627  case AV_CODEC_ID_FLV1:
3628  if (CONFIG_H263_ENCODER)
3630  break;
3631  }
3632 
3633  s->lambda= s->lambda_table[0];
3634  //FIXME broken
3635  }else
3636  s->lambda = s->cur_pic.ptr->f->quality;
3637  update_qscale(s);
3638  return 0;
3639 }
3640 
3641 /* must be called before writing the header */
3643  av_assert1(s->cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3644  s->time = s->cur_pic.ptr->f->pts * s->avctx->time_base.num;
3645 
3646  if(s->pict_type==AV_PICTURE_TYPE_B){
3647  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3648  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3649  }else{
3650  s->pp_time= s->time - s->last_non_b_time;
3651  s->last_non_b_time= s->time;
3652  av_assert1(s->picture_number==0 || s->pp_time > 0);
3653  }
3654 }
3655 
3657 {
3658  int i, ret;
3659  int bits;
3660  int context_count = s->slice_context_count;
3661 
3662  /* Reset the average MB variance */
3663  s->me.mb_var_sum_temp =
3664  s->me.mc_mb_var_sum_temp = 0;
3665 
3666  /* we need to initialize some time vars before we can encode B-frames */
3667  // RAL: Condition added for MPEG1VIDEO
3668  if (s->out_format == FMT_MPEG1 || (s->h263_pred && s->msmpeg4_version == MSMP4_UNUSED))
3670  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3672 
3673  s->me.scene_change_score=0;
3674 
3675 // s->lambda= s->cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3676 
3677  if(s->pict_type==AV_PICTURE_TYPE_I){
3678  s->no_rounding = s->msmpeg4_version >= MSMP4_V3;
3679  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3680  s->no_rounding ^= s->flipflop_rounding;
3681  }
3682 
3683  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3684  if (estimate_qp(s,1) < 0)
3685  return -1;
3687  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3688  if(s->pict_type==AV_PICTURE_TYPE_B)
3689  s->lambda= s->last_lambda_for[s->pict_type];
3690  else
3691  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3692  update_qscale(s);
3693  }
3694 
3695  ff_me_init_pic(s);
3696 
3697  s->mb_intra=0; //for the rate distortion & bit compare functions
3698  for (int i = 0; i < context_count; i++) {
3699  MpegEncContext *const slice = s->thread_context[i];
3700  uint8_t *start, *end;
3701  int h;
3702 
3703  if (i) {
3704  ret = ff_update_duplicate_context(slice, s);
3705  if (ret < 0)
3706  return ret;
3707  }
3708  slice->me.temp = slice->me.scratchpad = slice->sc.scratchpad_buf;
3709 
3710  h = s->mb_height;
3711  start = pkt->data + (size_t)(((int64_t) pkt->size) * slice->start_mb_y / h);
3712  end = pkt->data + (size_t)(((int64_t) pkt->size) * slice-> end_mb_y / h);
3713 
3714  init_put_bits(&s->thread_context[i]->pb, start, end - start);
3715  }
3716 
3717  /* Estimate motion for every MB */
3718  if(s->pict_type != AV_PICTURE_TYPE_I){
3719  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3720  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3721  if (s->pict_type != AV_PICTURE_TYPE_B) {
3722  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3723  s->me_pre == 2) {
3724  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3725  }
3726  }
3727 
3728  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3729  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3730  /* I-Frame */
3731  for(i=0; i<s->mb_stride*s->mb_height; i++)
3732  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3733 
3734  if(!s->fixed_qscale){
3735  /* finding spatial complexity for I-frame rate control */
3736  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3737  }
3738  }
3739  for(i=1; i<context_count; i++){
3740  merge_context_after_me(s, s->thread_context[i]);
3741  }
3742  s->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3743  s->mb_var_sum = s->me. mb_var_sum_temp;
3744  emms_c();
3745 
3746  if (s->me.scene_change_score > s->scenechange_threshold &&
3747  s->pict_type == AV_PICTURE_TYPE_P) {
3748  s->pict_type= AV_PICTURE_TYPE_I;
3749  for(i=0; i<s->mb_stride*s->mb_height; i++)
3750  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3751  if (s->msmpeg4_version >= MSMP4_V3)
3752  s->no_rounding=1;
3753  ff_dlog(s->avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3754  s->mb_var_sum, s->mc_mb_var_sum);
3755  }
3756 
3757  if(!s->umvplus){
3758  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3759  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3760 
3761  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3762  int a,b;
3763  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3764  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3765  s->f_code= FFMAX3(s->f_code, a, b);
3766  }
3767 
3769  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3770  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3771  int j;
3772  for(i=0; i<2; i++){
3773  for(j=0; j<2; j++)
3774  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3775  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3776  }
3777  }
3778  } else if (s->pict_type == AV_PICTURE_TYPE_B) {
3779  int a, b;
3780 
3781  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3782  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3783  s->f_code = FFMAX(a, b);
3784 
3785  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3786  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3787  s->b_code = FFMAX(a, b);
3788 
3789  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3790  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3791  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3792  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3793  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3794  int dir, j;
3795  for(dir=0; dir<2; dir++){
3796  for(i=0; i<2; i++){
3797  for(j=0; j<2; j++){
3800  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3801  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3802  }
3803  }
3804  }
3805  }
3806  }
3807  }
3808 
3809  if (estimate_qp(s, 0) < 0)
3810  return -1;
3811 
3812  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3813  s->pict_type == AV_PICTURE_TYPE_I &&
3814  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3815  s->qscale= 3; //reduce clipping problems
3816 
3817  if (s->out_format == FMT_MJPEG) {
3818  ret = ff_check_codec_matrices(s->avctx, FF_MATRIX_TYPE_INTRA | FF_MATRIX_TYPE_CHROMA_INTRA, (7 + s->qscale) / s->qscale, 65535);
3819  if (ret < 0)
3820  return ret;
3821 
3822  if (s->codec_id != AV_CODEC_ID_AMV) {
3823  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3824  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3825 
3826  if (s->avctx->intra_matrix) {
3827  chroma_matrix =
3828  luma_matrix = s->avctx->intra_matrix;
3829  }
3830  if (s->avctx->chroma_intra_matrix)
3831  chroma_matrix = s->avctx->chroma_intra_matrix;
3832 
3833  /* for mjpeg, we do include qscale in the matrix */
3834  for (int i = 1; i < 64; i++) {
3835  int j = s->idsp.idct_permutation[i];
3836 
3837  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3838  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3839  }
3840  s->y_dc_scale_table =
3841  s->c_dc_scale_table = ff_mpeg12_dc_scale_table[s->intra_dc_precision];
3842  s->chroma_intra_matrix[0] =
3843  s->intra_matrix[0] = ff_mpeg12_dc_scale_table[s->intra_dc_precision][8];
3844  } else {
3845  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3846  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3847  for (int i = 1; i < 64; i++) {
3848  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
3849 
3850  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3851  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3852  }
3853  s->y_dc_scale_table = y;
3854  s->c_dc_scale_table = c;
3855  s->intra_matrix[0] = 13;
3856  s->chroma_intra_matrix[0] = 14;
3857  }
3858  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3859  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3860  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3861  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3862  s->qscale = 8;
3863  }
3864 
3865  if (s->pict_type == AV_PICTURE_TYPE_I) {
3866  s->cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3867  } else {
3868  s->cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3869  }
3870  s->cur_pic.ptr->f->pict_type = s->pict_type;
3871 
3872  if (s->cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3873  s->picture_in_gop_number=0;
3874 
3875  s->mb_x = s->mb_y = 0;
3876  s->last_bits= put_bits_count(&s->pb);
3877  switch(s->out_format) {
3878 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
3879  case FMT_MJPEG:
3881  break;
3882 #endif
3883  case FMT_SPEEDHQ:
3884  if (CONFIG_SPEEDHQ_ENCODER)
3886  break;
3887  case FMT_H261:
3888  if (CONFIG_H261_ENCODER)
3890  break;
3891  case FMT_H263:
3892  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3894 #if CONFIG_MSMPEG4ENC
3895  else if (s->msmpeg4_version != MSMP4_UNUSED)
3897 #endif
3898  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3900  if (ret < 0)
3901  return ret;
3902  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3904  if (ret < 0)
3905  return ret;
3906  }
3907  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3909  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3911  else if (CONFIG_H263_ENCODER)
3913  break;
3914  case FMT_MPEG1:
3915  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3917  break;
3918  default:
3919  av_assert0(0);
3920  }
3921  bits= put_bits_count(&s->pb);
3922  s->header_bits= bits - s->last_bits;
3923 
3924  for(i=1; i<context_count; i++){
3925  update_duplicate_context_after_me(s->thread_context[i], s);
3926  }
3927  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3928  for(i=1; i<context_count; i++){
3929  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3930  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3931  merge_context_after_encode(s, s->thread_context[i]);
3932  }
3933  emms_c();
3934  return 0;
3935 }
3936 
3937 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3938  const int intra= s->mb_intra;
3939  int i;
3940 
3941  s->dct_count[intra]++;
3942 
3943  for(i=0; i<64; i++){
3944  int level= block[i];
3945 
3946  if(level){
3947  if(level>0){
3948  s->dct_error_sum[intra][i] += level;
3949  level -= s->dct_offset[intra][i];
3950  if(level<0) level=0;
3951  }else{
3952  s->dct_error_sum[intra][i] -= level;
3953  level += s->dct_offset[intra][i];
3954  if(level>0) level=0;
3955  }
3956  block[i]= level;
3957  }
3958  }
3959 }
3960 
3962  int16_t *block, int n,
3963  int qscale, int *overflow){
3964  const int *qmat;
3965  const uint16_t *matrix;
3966  const uint8_t *scantable;
3967  const uint8_t *perm_scantable;
3968  int max=0;
3969  unsigned int threshold1, threshold2;
3970  int bias=0;
3971  int run_tab[65];
3972  int level_tab[65];
3973  int score_tab[65];
3974  int survivor[65];
3975  int survivor_count;
3976  int last_run=0;
3977  int last_level=0;
3978  int last_score= 0;
3979  int last_i;
3980  int coeff[2][64];
3981  int coeff_count[64];
3982  int qmul, qadd, start_i, last_non_zero, i, dc;
3983  const int esc_length= s->ac_esc_length;
3984  const uint8_t *length, *last_length;
3985  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3986  int mpeg2_qscale;
3987 
3988  s->fdsp.fdct(block);
3989 
3990  if(s->dct_error_sum)
3991  s->denoise_dct(s, block);
3992  qmul= qscale*16;
3993  qadd= ((qscale-1)|1)*8;
3994 
3995  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3996  else mpeg2_qscale = qscale << 1;
3997 
3998  if (s->mb_intra) {
3999  int q;
4000  scantable= s->intra_scantable.scantable;
4001  perm_scantable= s->intra_scantable.permutated;
4002  if (!s->h263_aic) {
4003  if (n < 4)
4004  q = s->y_dc_scale;
4005  else
4006  q = s->c_dc_scale;
4007  q = q << 3;
4008  } else{
4009  /* For AIC we skip quant/dequant of INTRADC */
4010  q = 1 << 3;
4011  qadd=0;
4012  }
4013 
4014  /* note: block[0] is assumed to be positive */
4015  block[0] = (block[0] + (q >> 1)) / q;
4016  start_i = 1;
4017  last_non_zero = 0;
4018  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4019  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4020  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4021  bias= 1<<(QMAT_SHIFT-1);
4022 
4023  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4024  length = s->intra_chroma_ac_vlc_length;
4025  last_length= s->intra_chroma_ac_vlc_last_length;
4026  } else {
4027  length = s->intra_ac_vlc_length;
4028  last_length= s->intra_ac_vlc_last_length;
4029  }
4030  } else {
4031  scantable= s->inter_scantable.scantable;
4032  perm_scantable= s->inter_scantable.permutated;
4033  start_i = 0;
4034  last_non_zero = -1;
4035  qmat = s->q_inter_matrix[qscale];
4036  matrix = s->inter_matrix;
4037  length = s->inter_ac_vlc_length;
4038  last_length= s->inter_ac_vlc_last_length;
4039  }
4040  last_i= start_i;
4041 
4042  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4043  threshold2= (threshold1<<1);
4044 
4045  for(i=63; i>=start_i; i--) {
4046  const int j = scantable[i];
4047  int64_t level = (int64_t)block[j] * qmat[j];
4048 
4049  if(((uint64_t)(level+threshold1))>threshold2){
4050  last_non_zero = i;
4051  break;
4052  }
4053  }
4054 
4055  for(i=start_i; i<=last_non_zero; i++) {
4056  const int j = scantable[i];
4057  int64_t level = (int64_t)block[j] * qmat[j];
4058 
4059 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4060 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4061  if(((uint64_t)(level+threshold1))>threshold2){
4062  if(level>0){
4063  level= (bias + level)>>QMAT_SHIFT;
4064  coeff[0][i]= level;
4065  coeff[1][i]= level-1;
4066 // coeff[2][k]= level-2;
4067  }else{
4068  level= (bias - level)>>QMAT_SHIFT;
4069  coeff[0][i]= -level;
4070  coeff[1][i]= -level+1;
4071 // coeff[2][k]= -level+2;
4072  }
4073  coeff_count[i]= FFMIN(level, 2);
4074  av_assert2(coeff_count[i]);
4075  max |=level;
4076  }else{
4077  coeff[0][i]= (level>>31)|1;
4078  coeff_count[i]= 1;
4079  }
4080  }
4081 
4082  *overflow= s->max_qcoeff < max; //overflow might have happened
4083 
4084  if(last_non_zero < start_i){
4085  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4086  return last_non_zero;
4087  }
4088 
4089  score_tab[start_i]= 0;
4090  survivor[0]= start_i;
4091  survivor_count= 1;
4092 
4093  for(i=start_i; i<=last_non_zero; i++){
4094  int level_index, j, zero_distortion;
4095  int dct_coeff= FFABS(block[ scantable[i] ]);
4096  int best_score=256*256*256*120;
4097 
4098  if (s->fdsp.fdct == ff_fdct_ifast)
4099  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4100  zero_distortion= dct_coeff*dct_coeff;
4101 
4102  for(level_index=0; level_index < coeff_count[i]; level_index++){
4103  int distortion;
4104  int level= coeff[level_index][i];
4105  const int alevel= FFABS(level);
4106  int unquant_coeff;
4107 
4108  av_assert2(level);
4109 
4110  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4111  unquant_coeff= alevel*qmul + qadd;
4112  } else if(s->out_format == FMT_MJPEG) {
4113  j = s->idsp.idct_permutation[scantable[i]];
4114  unquant_coeff = alevel * matrix[j] * 8;
4115  }else{ // MPEG-1
4116  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4117  if(s->mb_intra){
4118  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4119  unquant_coeff = (unquant_coeff - 1) | 1;
4120  }else{
4121  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4122  unquant_coeff = (unquant_coeff - 1) | 1;
4123  }
4124  unquant_coeff<<= 3;
4125  }
4126 
4127  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4128  level+=64;
4129  if((level&(~127)) == 0){
4130  for(j=survivor_count-1; j>=0; j--){
4131  int run= i - survivor[j];
4132  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4133  score += score_tab[i-run];
4134 
4135  if(score < best_score){
4136  best_score= score;
4137  run_tab[i+1]= run;
4138  level_tab[i+1]= level-64;
4139  }
4140  }
4141 
4142  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4143  for(j=survivor_count-1; j>=0; j--){
4144  int run= i - survivor[j];
4145  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4146  score += score_tab[i-run];
4147  if(score < last_score){
4148  last_score= score;
4149  last_run= run;
4150  last_level= level-64;
4151  last_i= i+1;
4152  }
4153  }
4154  }
4155  }else{
4156  distortion += esc_length*lambda;
4157  for(j=survivor_count-1; j>=0; j--){
4158  int run= i - survivor[j];
4159  int score= distortion + score_tab[i-run];
4160 
4161  if(score < best_score){
4162  best_score= score;
4163  run_tab[i+1]= run;
4164  level_tab[i+1]= level-64;
4165  }
4166  }
4167 
4168  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4169  for(j=survivor_count-1; j>=0; j--){
4170  int run= i - survivor[j];
4171  int score= distortion + score_tab[i-run];
4172  if(score < last_score){
4173  last_score= score;
4174  last_run= run;
4175  last_level= level-64;
4176  last_i= i+1;
4177  }
4178  }
4179  }
4180  }
4181  }
4182 
4183  score_tab[i+1]= best_score;
4184 
4185  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4186  if(last_non_zero <= 27){
4187  for(; survivor_count; survivor_count--){
4188  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4189  break;
4190  }
4191  }else{
4192  for(; survivor_count; survivor_count--){
4193  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4194  break;
4195  }
4196  }
4197 
4198  survivor[ survivor_count++ ]= i+1;
4199  }
4200 
4201  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4202  last_score= 256*256*256*120;
4203  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4204  int score= score_tab[i];
4205  if (i)
4206  score += lambda * 2; // FIXME more exact?
4207 
4208  if(score < last_score){
4209  last_score= score;
4210  last_i= i;
4211  last_level= level_tab[i];
4212  last_run= run_tab[i];
4213  }
4214  }
4215  }
4216 
4217  s->coded_score[n] = last_score;
4218 
4219  dc= FFABS(block[0]);
4220  last_non_zero= last_i - 1;
4221  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4222 
4223  if(last_non_zero < start_i)
4224  return last_non_zero;
4225 
4226  if(last_non_zero == 0 && start_i == 0){
4227  int best_level= 0;
4228  int best_score= dc * dc;
4229 
4230  for(i=0; i<coeff_count[0]; i++){
4231  int level= coeff[i][0];
4232  int alevel= FFABS(level);
4233  int unquant_coeff, score, distortion;
4234 
4235  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4236  unquant_coeff= (alevel*qmul + qadd)>>3;
4237  } else{ // MPEG-1
4238  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4239  unquant_coeff = (unquant_coeff - 1) | 1;
4240  }
4241  unquant_coeff = (unquant_coeff + 4) >> 3;
4242  unquant_coeff<<= 3 + 3;
4243 
4244  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4245  level+=64;
4246  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4247  else score= distortion + esc_length*lambda;
4248 
4249  if(score < best_score){
4250  best_score= score;
4251  best_level= level - 64;
4252  }
4253  }
4254  block[0]= best_level;
4255  s->coded_score[n] = best_score - dc*dc;
4256  if(best_level == 0) return -1;
4257  else return last_non_zero;
4258  }
4259 
4260  i= last_i;
4261  av_assert2(last_level);
4262 
4263  block[ perm_scantable[last_non_zero] ]= last_level;
4264  i -= last_run + 1;
4265 
4266  for(; i>start_i; i -= run_tab[i] + 1){
4267  block[ perm_scantable[i-1] ]= level_tab[i];
4268  }
4269 
4270  return last_non_zero;
4271 }
4272 
4273 static int16_t basis[64][64];
4274 
4275 static void build_basis(uint8_t *perm){
4276  int i, j, x, y;
4277  emms_c();
4278  for(i=0; i<8; i++){
4279  for(j=0; j<8; j++){
4280  for(y=0; y<8; y++){
4281  for(x=0; x<8; x++){
4282  double s= 0.25*(1<<BASIS_SHIFT);
4283  int index= 8*i + j;
4284  int perm_index= perm[index];
4285  if(i==0) s*= sqrt(0.5);
4286  if(j==0) s*= sqrt(0.5);
4287  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4288  }
4289  }
4290  }
4291  }
4292 }
4293 
4294 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4295  int16_t *block, int16_t *weight, int16_t *orig,
4296  int n, int qscale){
4297  int16_t rem[64];
4298  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4299  const uint8_t *scantable;
4300  const uint8_t *perm_scantable;
4301 // unsigned int threshold1, threshold2;
4302 // int bias=0;
4303  int run_tab[65];
4304  int prev_run=0;
4305  int prev_level=0;
4306  int qmul, qadd, start_i, last_non_zero, i, dc;
4307  const uint8_t *length;
4308  const uint8_t *last_length;
4309  int lambda;
4310  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4311 
4312  if(basis[0][0] == 0)
4313  build_basis(s->idsp.idct_permutation);
4314 
4315  qmul= qscale*2;
4316  qadd= (qscale-1)|1;
4317  if (s->mb_intra) {
4318  scantable= s->intra_scantable.scantable;
4319  perm_scantable= s->intra_scantable.permutated;
4320  if (!s->h263_aic) {
4321  if (n < 4)
4322  q = s->y_dc_scale;
4323  else
4324  q = s->c_dc_scale;
4325  } else{
4326  /* For AIC we skip quant/dequant of INTRADC */
4327  q = 1;
4328  qadd=0;
4329  }
4330  q <<= RECON_SHIFT-3;
4331  /* note: block[0] is assumed to be positive */
4332  dc= block[0]*q;
4333 // block[0] = (block[0] + (q >> 1)) / q;
4334  start_i = 1;
4335 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4336 // bias= 1<<(QMAT_SHIFT-1);
4337  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4338  length = s->intra_chroma_ac_vlc_length;
4339  last_length= s->intra_chroma_ac_vlc_last_length;
4340  } else {
4341  length = s->intra_ac_vlc_length;
4342  last_length= s->intra_ac_vlc_last_length;
4343  }
4344  } else {
4345  scantable= s->inter_scantable.scantable;
4346  perm_scantable= s->inter_scantable.permutated;
4347  dc= 0;
4348  start_i = 0;
4349  length = s->inter_ac_vlc_length;
4350  last_length= s->inter_ac_vlc_last_length;
4351  }
4352  last_non_zero = s->block_last_index[n];
4353 
4354  dc += (1<<(RECON_SHIFT-1));
4355  for(i=0; i<64; i++){
4356  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4357  }
4358 
4359  sum=0;
4360  for(i=0; i<64; i++){
4361  int one= 36;
4362  int qns=4;
4363  int w;
4364 
4365  w= FFABS(weight[i]) + qns*one;
4366  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4367 
4368  weight[i] = w;
4369 // w=weight[i] = (63*qns + (w/2)) / w;
4370 
4371  av_assert2(w>0);
4372  av_assert2(w<(1<<6));
4373  sum += w*w;
4374  }
4375  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4376 
4377  run=0;
4378  rle_index=0;
4379  for(i=start_i; i<=last_non_zero; i++){
4380  int j= perm_scantable[i];
4381  const int level= block[j];
4382  int coeff;
4383 
4384  if(level){
4385  if(level<0) coeff= qmul*level - qadd;
4386  else coeff= qmul*level + qadd;
4387  run_tab[rle_index++]=run;
4388  run=0;
4389 
4390  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4391  }else{
4392  run++;
4393  }
4394  }
4395 
4396  for(;;){
4397  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4398  int best_coeff=0;
4399  int best_change=0;
4400  int run2, best_unquant_change=0, analyze_gradient;
4401  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4402 
4403  if(analyze_gradient){
4404  for(i=0; i<64; i++){
4405  int w= weight[i];
4406 
4407  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4408  }
4409  s->fdsp.fdct(d1);
4410  }
4411 
4412  if(start_i){
4413  const int level= block[0];
4414  int change, old_coeff;
4415 
4416  av_assert2(s->mb_intra);
4417 
4418  old_coeff= q*level;
4419 
4420  for(change=-1; change<=1; change+=2){
4421  int new_level= level + change;
4422  int score, new_coeff;
4423 
4424  new_coeff= q*new_level;
4425  if(new_coeff >= 2048 || new_coeff < 0)
4426  continue;
4427 
4428  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4429  new_coeff - old_coeff);
4430  if(score<best_score){
4431  best_score= score;
4432  best_coeff= 0;
4433  best_change= change;
4434  best_unquant_change= new_coeff - old_coeff;
4435  }
4436  }
4437  }
4438 
4439  run=0;
4440  rle_index=0;
4441  run2= run_tab[rle_index++];
4442  prev_level=0;
4443  prev_run=0;
4444 
4445  for(i=start_i; i<64; i++){
4446  int j= perm_scantable[i];
4447  const int level= block[j];
4448  int change, old_coeff;
4449 
4450  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4451  break;
4452 
4453  if(level){
4454  if(level<0) old_coeff= qmul*level - qadd;
4455  else old_coeff= qmul*level + qadd;
4456  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4457  }else{
4458  old_coeff=0;
4459  run2--;
4460  av_assert2(run2>=0 || i >= last_non_zero );
4461  }
4462 
4463  for(change=-1; change<=1; change+=2){
4464  int new_level= level + change;
4465  int score, new_coeff, unquant_change;
4466 
4467  score=0;
4468  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4469  continue;
4470 
4471  if(new_level){
4472  if(new_level<0) new_coeff= qmul*new_level - qadd;
4473  else new_coeff= qmul*new_level + qadd;
4474  if(new_coeff >= 2048 || new_coeff <= -2048)
4475  continue;
4476  //FIXME check for overflow
4477 
4478  if(level){
4479  if(level < 63 && level > -63){
4480  if(i < last_non_zero)
4481  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4482  - length[UNI_AC_ENC_INDEX(run, level+64)];
4483  else
4484  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4485  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4486  }
4487  }else{
4488  av_assert2(FFABS(new_level)==1);
4489 
4490  if(analyze_gradient){
4491  int g= d1[ scantable[i] ];
4492  if(g && (g^new_level) >= 0)
4493  continue;
4494  }
4495 
4496  if(i < last_non_zero){
4497  int next_i= i + run2 + 1;
4498  int next_level= block[ perm_scantable[next_i] ] + 64;
4499 
4500  if(next_level&(~127))
4501  next_level= 0;
4502 
4503  if(next_i < last_non_zero)
4504  score += length[UNI_AC_ENC_INDEX(run, 65)]
4505  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4506  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4507  else
4508  score += length[UNI_AC_ENC_INDEX(run, 65)]
4509  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4510  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4511  }else{
4512  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4513  if(prev_level){
4514  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4515  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4516  }
4517  }
4518  }
4519  }else{
4520  new_coeff=0;
4521  av_assert2(FFABS(level)==1);
4522 
4523  if(i < last_non_zero){
4524  int next_i= i + run2 + 1;
4525  int next_level= block[ perm_scantable[next_i] ] + 64;
4526 
4527  if(next_level&(~127))
4528  next_level= 0;
4529 
4530  if(next_i < last_non_zero)
4531  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4532  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4533  - length[UNI_AC_ENC_INDEX(run, 65)];
4534  else
4535  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4536  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4537  - length[UNI_AC_ENC_INDEX(run, 65)];
4538  }else{
4539  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4540  if(prev_level){
4541  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4542  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4543  }
4544  }
4545  }
4546 
4547  score *= lambda;
4548 
4549  unquant_change= new_coeff - old_coeff;
4550  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4551 
4552  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4553  unquant_change);
4554  if(score<best_score){
4555  best_score= score;
4556  best_coeff= i;
4557  best_change= change;
4558  best_unquant_change= unquant_change;
4559  }
4560  }
4561  if(level){
4562  prev_level= level + 64;
4563  if(prev_level&(~127))
4564  prev_level= 0;
4565  prev_run= run;
4566  run=0;
4567  }else{
4568  run++;
4569  }
4570  }
4571 
4572  if(best_change){
4573  int j= perm_scantable[ best_coeff ];
4574 
4575  block[j] += best_change;
4576 
4577  if(best_coeff > last_non_zero){
4578  last_non_zero= best_coeff;
4579  av_assert2(block[j]);
4580  }else{
4581  for(; last_non_zero>=start_i; last_non_zero--){
4582  if(block[perm_scantable[last_non_zero]])
4583  break;
4584  }
4585  }
4586 
4587  run=0;
4588  rle_index=0;
4589  for(i=start_i; i<=last_non_zero; i++){
4590  int j= perm_scantable[i];
4591  const int level= block[j];
4592 
4593  if(level){
4594  run_tab[rle_index++]=run;
4595  run=0;
4596  }else{
4597  run++;
4598  }
4599  }
4600 
4601  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4602  }else{
4603  break;
4604  }
4605  }
4606 
4607  return last_non_zero;
4608 }
4609 
4610 /**
4611  * Permute an 8x8 block according to permutation.
4612  * @param block the block which will be permuted according to
4613  * the given permutation vector
4614  * @param permutation the permutation vector
4615  * @param last the last non zero coefficient in scantable order, used to
4616  * speed the permutation up
4617  * @param scantable the used scantable, this is only used to speed the
4618  * permutation up, the block is not (inverse) permutated
4619  * to scantable order!
4620  */
4621 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4622  const uint8_t *scantable, int last)
4623 {
4624  int i;
4625  int16_t temp[64];
4626 
4627  if (last <= 0)
4628  return;
4629  //FIXME it is ok but not clean and might fail for some permutations
4630  // if (permutation[1] == 1)
4631  // return;
4632 
4633  for (i = 0; i <= last; i++) {
4634  const int j = scantable[i];
4635  temp[j] = block[j];
4636  block[j] = 0;
4637  }
4638 
4639  for (i = 0; i <= last; i++) {
4640  const int j = scantable[i];
4641  const int perm_j = permutation[j];
4642  block[perm_j] = temp[j];
4643  }
4644 }
4645 
4647  int16_t *block, int n,
4648  int qscale, int *overflow)
4649 {
4650  int i, last_non_zero, q, start_i;
4651  const int *qmat;
4652  const uint8_t *scantable;
4653  int bias;
4654  int max=0;
4655  unsigned int threshold1, threshold2;
4656 
4657  s->fdsp.fdct(block);
4658 
4659  if(s->dct_error_sum)
4660  s->denoise_dct(s, block);
4661 
4662  if (s->mb_intra) {
4663  scantable= s->intra_scantable.scantable;
4664  if (!s->h263_aic) {
4665  if (n < 4)
4666  q = s->y_dc_scale;
4667  else
4668  q = s->c_dc_scale;
4669  q = q << 3;
4670  } else
4671  /* For AIC we skip quant/dequant of INTRADC */
4672  q = 1 << 3;
4673 
4674  /* note: block[0] is assumed to be positive */
4675  block[0] = (block[0] + (q >> 1)) / q;
4676  start_i = 1;
4677  last_non_zero = 0;
4678  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4679  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4680  } else {
4681  scantable= s->inter_scantable.scantable;
4682  start_i = 0;
4683  last_non_zero = -1;
4684  qmat = s->q_inter_matrix[qscale];
4685  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4686  }
4687  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4688  threshold2= (threshold1<<1);
4689  for(i=63;i>=start_i;i--) {
4690  const int j = scantable[i];
4691  int64_t level = (int64_t)block[j] * qmat[j];
4692 
4693  if(((uint64_t)(level+threshold1))>threshold2){
4694  last_non_zero = i;
4695  break;
4696  }else{
4697  block[j]=0;
4698  }
4699  }
4700  for(i=start_i; i<=last_non_zero; i++) {
4701  const int j = scantable[i];
4702  int64_t level = (int64_t)block[j] * qmat[j];
4703 
4704 // if( bias+level >= (1<<QMAT_SHIFT)
4705 // || bias-level >= (1<<QMAT_SHIFT)){
4706  if(((uint64_t)(level+threshold1))>threshold2){
4707  if(level>0){
4708  level= (bias + level)>>QMAT_SHIFT;
4709  block[j]= level;
4710  }else{
4711  level= (bias - level)>>QMAT_SHIFT;
4712  block[j]= -level;
4713  }
4714  max |=level;
4715  }else{
4716  block[j]=0;
4717  }
4718  }
4719  *overflow= s->max_qcoeff < max; //overflow might have happened
4720 
4721  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4722  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4723  ff_block_permute(block, s->idsp.idct_permutation,
4724  scantable, last_non_zero);
4725 
4726  return last_non_zero;
4727 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1433
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:344
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:685
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:103
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:84
ff_speedhq_end_slice
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:155
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:235
MpegEncContext::mb_skipped
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:190
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:430
encode_picture
static int encode_picture(MpegEncContext *s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3656
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegvideoenc.h)
Definition: mpegvideo.h:289
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:49
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:264
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
h263data.h
ff_speedhq_encode_init
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:98
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:104
level
uint8_t level
Definition: svq3.c:205
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:387
av_clip
#define av_clip
Definition: common.h:100
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3642
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideoenc.h:160
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:541
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:201
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:28
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
r
const char * r
Definition: vf_curves.c:127
ff_h261_encode_init
av_cold int ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:355
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:819
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:227
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:228
mem_internal.h
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:308
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:212
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1317
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1651
ff_speedhq_encode_picture_header
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:143
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s)
Definition: wmv2enc.c:97
thread.h
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
matrix
Definition: vc1dsp.c:43
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s)
Definition: h261enc.c:69
src1
const pixel * src1
Definition: h264pred_template.c:420
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:224
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2748
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
COPY
#define COPY(a)
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4273
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:988
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:164
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2792
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:848
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1831
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:262
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:70
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:388
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:42
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:330
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:459
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:223
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:522
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:206
w
uint8_t w
Definition: llviddspenc.c:38
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.h:356
internal.h
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:228
AVPacket::data
uint8_t * data
Definition: packet.h:539
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1089
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:377
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:41
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:100
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:180
data
const char data[16]
Definition: mxf.c:149
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
prepare_picture
static int prepare_picture(MpegEncContext *s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1220
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:218
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:308
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac for the current non-intra MB.
Definition: mpegvideo.c:791
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:88
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:416
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:293
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:815
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:385
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:557
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:59
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:497
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:326
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2773
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2186
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:421
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:148
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:976
wmv2enc.h
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:64
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1281
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:351
mpegutils.h
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s)
Definition: flvenc.c:26
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:419
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:228
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:615
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:594
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:418
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:902
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:75
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:274
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:53
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:261
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:56
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:129
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:918
MpegEncContext::encoding_error
uint64_t encoding_error[MPV_MAX_PLANES]
Definition: mpegvideo.h:254
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1867
skip_check
static int skip_check(MpegEncContext *s, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1392
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:86
sp5x.h
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:66
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3606
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:53
FDCTDSPContext
Definition: fdctdsp.h:28
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:198
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:63
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:829
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:460
mpeg12enc.h
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3573
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:179
frame_start
static void frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1852
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:459
fail
#define fail()
Definition: checkasm.h:193
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:138
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:67
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:129
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:996
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:508
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:44
perm
perm
Definition: f_perms.c:75
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1249
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:334
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:62
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:318
ff_sqrt
#define ff_sqrt
Definition: mathops.h:216
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
mpv_encode_init_static
static void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:271
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:445
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:135
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:330
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:46
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2903
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:51
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:537
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:58
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:907
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:342
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:772
RateControlContext
rate control context.
Definition: ratecontrol.h:60
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2880
av_cold
#define av_cold
Definition: attributes.h:90
MAX_MV
#define MAX_MV
Definition: motion_est.h:36
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:661
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:96
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:129
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1109
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4275
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:737
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1179
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:112
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:456
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:259
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:874
MpegEncContext::mb_skip_run
int mb_skip_run
Definition: mpegvideo.h:287
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1487
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
init_matrices
static av_cold int init_matrices(MpegEncContext *s, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:352
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1593
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:230
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:226
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
init_qscale_tab
static void init_qscale_tab(MpegEncContext *s)
init s->cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:242
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1310
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4621
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1545
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:271
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:48
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:47
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:461
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:53
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:869
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2817
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
me_cmp_init
static av_cold int me_cmp_init(MpegEncContext *s, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:310
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1295
MECmpContext
Definition: me_cmp.h:55
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:391
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:52
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:491
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:633
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2860
run
uint8_t run
Definition: svq3.c:204
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:286
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:328
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:227
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:307
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:483
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:48
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:486
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:58
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:501
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:300
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:239
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo_enc.c:255
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:848
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
ff_dct_encode_init
av_cold void ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:295
mathops.h
dct_quantize_c
static int dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4646
MpegEncContext::mv_bits
int mv_bits
Definition: mpegvideo.h:340
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:347
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3572
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:908
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:720
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1040
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:977
qpeldsp.h
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:143
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1345
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
mpeg12codecs.h
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:280
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1321
ff_mjpeg_amv_encode_picture_header
void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
Definition: mjpegenc.c:107
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
AVOnce
#define AVOnce
Definition: thread.h:202
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1053
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:265
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:855
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:281
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3937
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:426
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1334
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:420
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:558
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:847
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1420
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:131
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1352
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2210
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:236
f
f
Definition: af_crystalizer.c:122
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3579
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:288
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:82
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:55
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
AVPacket::size
int size
Definition: packet.h:540
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1045
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:199
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1252
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:57
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:308
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:2096
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:59
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:298
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:590
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1061
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:128
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:202
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:56
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:329
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:280
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:41
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:98
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:61
MpegEncContext::interlaced_dct
int interlaced_dct
Definition: mpegvideo.h:463
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:281
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:67
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:478
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:538
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:314
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:351
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:263
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ff_me_init_pic
void ff_me_init_pic(MpegEncContext *s)
Definition: motion_est.c:370
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:267
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:545
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:64
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:286
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:37
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:54
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:220
ff_mpvenc_dct_init_mips
av_cold void ff_mpvenc_dct_init_mips(MpegEncContext *s)
Definition: mpegvideoenc_init_mips.c:26
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:43
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:198
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:457
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s)
Definition: msmpeg4enc.c:218
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:301
emms.h
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:78
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
ff_speedhq_encode_mb
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:252
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
MpegEncContext::last_mv
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:276
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:130
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:461
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:455
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:532
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:80
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3961
get_intra_count
static int get_intra_count(MpegEncContext *s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1193
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2937
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:37
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:105
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1454
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1011
MpegEncContext::esc3_level_length
int esc3_level_length
Definition: mpegvideo.h:425
src2
const pixel * src2
Definition: h264pred_template.c:421
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:390
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:276
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:286
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:49
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:119
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:557
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:197
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:919
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:530
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2611
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
AVCodecContext::height
int height
Definition: avcodec.h:632
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:508
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:671
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
idctdsp.h
avcodec.h
stride
#define stride
Definition: h264pred_template.c:536
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
vshift
static int vshift(enum AVPixelFormat fmt, int plane)
Definition: graph.c:99
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:352
ret
ret
Definition: filter_design.txt:187
me_cmp_func
int(* me_cmp_func)(struct MpegEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:50
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:292
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1397
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2676
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:94
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s)
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:53
mpeg12data.h
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:882
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1700
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:472
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:205
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:458
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:876
AVCodecContext
main external API structure.
Definition: avcodec.h:451
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:886
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1360
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:392
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:232
mpeg4video.h
MpegEncContext::last_bits
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:346
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1274
encode_mb
static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2601
AVRational::den
int den
Denominator.
Definition: rational.h:60
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1736
set_bframe_chain_length
static int set_bframe_chain_length(MpegEncContext *s)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1606
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:862
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1795
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1096
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:263
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:866
flvenc.h
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:85
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:979
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s)
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:38
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:809
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:111
packet_internal.h
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:420
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:527
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1324
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:143
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4294
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s)
Definition: rv10enc.c:34
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, const MpegEncContext *s)
Definition: mpegvideo_enc.c:2638
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:260
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1061
ff_h263_update_mb
void ff_h263_update_mb(MpegEncContext *s)
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:978
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
AVPacket
This structure stores compressed data.
Definition: packet.h:516
mpeg4videodata.h
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:2152
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:997
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:51
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:253
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:632
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sse
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2727
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:345
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:460
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: packet.c:610
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:896
h
h
Definition: vp9dsp_template.c:2070
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:909
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:151
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:85
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:60
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:150
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:282
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:62
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:234
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:170
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:656
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:343
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s)
Definition: rv20enc.c:37
pixblockdsp.h
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1599
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:954
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:310
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:940
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:454
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:104
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2840
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:709
intmath.h