00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/mathematics.h"
00032 #include "libavutil/opt.h"
00033 #include "avcodec.h"
00034 #include "dsputil.h"
00035 #include "mpegvideo.h"
00036 #include "h263.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "thread.h"
00041 #include "aandcttab.h"
00042 #include "flv.h"
00043 #include "mpeg4video.h"
00044 #include "internal.h"
00045 #include "bytestream.h"
00046 #include <limits.h>
00047 #include "sp5x.h"
00048
00049
00050
00051
00052 static int encode_picture(MpegEncContext *s, int picture_number);
00053 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
00054 static int sse_mb(MpegEncContext *s);
00055 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
00056 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
00057
00058
00059
00060
00061
00062
00063 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
00064 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
00065
00066 const AVOption ff_mpv_generic_options[] = {
00067 FF_MPV_COMMON_OPTS
00068 { NULL },
00069 };
00070
00071 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
00072 uint16_t (*qmat16)[2][64],
00073 const uint16_t *quant_matrix,
00074 int bias, int qmin, int qmax, int intra)
00075 {
00076 int qscale;
00077 int shift = 0;
00078
00079 for (qscale = qmin; qscale <= qmax; qscale++) {
00080 int i;
00081 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
00082 dsp->fdct == ff_jpeg_fdct_islow_10 ||
00083 dsp->fdct == ff_faandct) {
00084 for (i = 0; i < 64; i++) {
00085 const int j = dsp->idct_permutation[i];
00086
00087
00088
00089
00090
00091
00092 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
00093 (qscale * quant_matrix[j]));
00094 }
00095 } else if (dsp->fdct == ff_fdct_ifast) {
00096 for (i = 0; i < 64; i++) {
00097 const int j = dsp->idct_permutation[i];
00098
00099
00100
00101
00102
00103
00104 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
00105 (ff_aanscales[i] * qscale * quant_matrix[j]));
00106 }
00107 } else {
00108 for (i = 0; i < 64; i++) {
00109 const int j = dsp->idct_permutation[i];
00110
00111
00112
00113
00114
00115 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
00116 (qscale * quant_matrix[j]));
00117
00118
00119 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
00120 (qscale * quant_matrix[j]);
00121
00122 if (qmat16[qscale][0][i] == 0 ||
00123 qmat16[qscale][0][i] == 128 * 256)
00124 qmat16[qscale][0][i] = 128 * 256 - 1;
00125 qmat16[qscale][1][i] =
00126 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
00127 qmat16[qscale][0][i]);
00128 }
00129 }
00130
00131 for (i = intra; i < 64; i++) {
00132 int64_t max = 8191;
00133 if (dsp->fdct == ff_fdct_ifast) {
00134 max = (8191LL * ff_aanscales[i]) >> 14;
00135 }
00136 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
00137 shift++;
00138 }
00139 }
00140 }
00141 if (shift) {
00142 av_log(NULL, AV_LOG_INFO,
00143 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
00144 QMAT_SHIFT - shift);
00145 }
00146 }
00147
00148 static inline void update_qscale(MpegEncContext *s)
00149 {
00150 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
00151 (FF_LAMBDA_SHIFT + 7);
00152 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
00153
00154 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
00155 FF_LAMBDA_SHIFT;
00156 }
00157
00158 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
00159 {
00160 int i;
00161
00162 if (matrix) {
00163 put_bits(pb, 1, 1);
00164 for (i = 0; i < 64; i++) {
00165 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
00166 }
00167 } else
00168 put_bits(pb, 1, 0);
00169 }
00170
00174 void ff_init_qscale_tab(MpegEncContext *s)
00175 {
00176 int8_t * const qscale_table = s->current_picture.f.qscale_table;
00177 int i;
00178
00179 for (i = 0; i < s->mb_num; i++) {
00180 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
00181 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
00182 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
00183 s->avctx->qmax);
00184 }
00185 }
00186
00187 static void copy_picture_attributes(MpegEncContext *s,
00188 AVFrame *dst,
00189 AVFrame *src)
00190 {
00191 int i;
00192
00193 dst->pict_type = src->pict_type;
00194 dst->quality = src->quality;
00195 dst->coded_picture_number = src->coded_picture_number;
00196 dst->display_picture_number = src->display_picture_number;
00197
00198 dst->pts = src->pts;
00199 dst->interlaced_frame = src->interlaced_frame;
00200 dst->top_field_first = src->top_field_first;
00201
00202 if (s->avctx->me_threshold) {
00203 if (!src->motion_val[0])
00204 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
00205 if (!src->mb_type)
00206 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
00207 if (!src->ref_index[0])
00208 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
00209 if (src->motion_subsample_log2 != dst->motion_subsample_log2)
00210 av_log(s->avctx, AV_LOG_ERROR,
00211 "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
00212 src->motion_subsample_log2, dst->motion_subsample_log2);
00213
00214 memcpy(dst->mb_type, src->mb_type,
00215 s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
00216
00217 for (i = 0; i < 2; i++) {
00218 int stride = ((16 * s->mb_width ) >>
00219 src->motion_subsample_log2) + 1;
00220 int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
00221
00222 if (src->motion_val[i] &&
00223 src->motion_val[i] != dst->motion_val[i]) {
00224 memcpy(dst->motion_val[i], src->motion_val[i],
00225 2 * stride * height * sizeof(int16_t));
00226 }
00227 if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
00228 memcpy(dst->ref_index[i], src->ref_index[i],
00229 s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
00230 }
00231 }
00232 }
00233 }
00234
00235 static void update_duplicate_context_after_me(MpegEncContext *dst,
00236 MpegEncContext *src)
00237 {
00238 #define COPY(a) dst->a= src->a
00239 COPY(pict_type);
00240 COPY(current_picture);
00241 COPY(f_code);
00242 COPY(b_code);
00243 COPY(qscale);
00244 COPY(lambda);
00245 COPY(lambda2);
00246 COPY(picture_in_gop_number);
00247 COPY(gop_picture_number);
00248 COPY(frame_pred_frame_dct);
00249 COPY(progressive_frame);
00250 COPY(partitioned_frame);
00251 #undef COPY
00252 }
00253
00258 static void MPV_encode_defaults(MpegEncContext *s)
00259 {
00260 int i;
00261 ff_MPV_common_defaults(s);
00262
00263 for (i = -16; i < 16; i++) {
00264 default_fcode_tab[i + MAX_MV] = 1;
00265 }
00266 s->me.mv_penalty = default_mv_penalty;
00267 s->fcode_tab = default_fcode_tab;
00268 }
00269
00270 av_cold int ff_dct_encode_init(MpegEncContext *s) {
00271 if (ARCH_X86)
00272 ff_dct_encode_init_x86(s);
00273
00274 if (!s->dct_quantize)
00275 s->dct_quantize = ff_dct_quantize_c;
00276 if (!s->denoise_dct)
00277 s->denoise_dct = denoise_dct_c;
00278 s->fast_dct_quantize = s->dct_quantize;
00279 if (s->avctx->trellis)
00280 s->dct_quantize = dct_quantize_trellis_c;
00281
00282 return 0;
00283 }
00284
00285
00286 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
00287 {
00288 MpegEncContext *s = avctx->priv_data;
00289 int i;
00290 int chroma_h_shift, chroma_v_shift;
00291
00292 MPV_encode_defaults(s);
00293
00294 switch (avctx->codec_id) {
00295 case AV_CODEC_ID_MPEG2VIDEO:
00296 if (avctx->pix_fmt != PIX_FMT_YUV420P &&
00297 avctx->pix_fmt != PIX_FMT_YUV422P) {
00298 av_log(avctx, AV_LOG_ERROR,
00299 "only YUV420 and YUV422 are supported\n");
00300 return -1;
00301 }
00302 break;
00303 case AV_CODEC_ID_LJPEG:
00304 if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
00305 avctx->pix_fmt != PIX_FMT_YUVJ422P &&
00306 avctx->pix_fmt != PIX_FMT_YUVJ444P &&
00307 avctx->pix_fmt != PIX_FMT_BGR0 &&
00308 avctx->pix_fmt != PIX_FMT_BGRA &&
00309 avctx->pix_fmt != PIX_FMT_BGR24 &&
00310 ((avctx->pix_fmt != PIX_FMT_YUV420P &&
00311 avctx->pix_fmt != PIX_FMT_YUV422P &&
00312 avctx->pix_fmt != PIX_FMT_YUV444P) ||
00313 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
00314 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
00315 return -1;
00316 }
00317 break;
00318 case AV_CODEC_ID_MJPEG:
00319 case AV_CODEC_ID_AMV:
00320 if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
00321 avctx->pix_fmt != PIX_FMT_YUVJ422P &&
00322 ((avctx->pix_fmt != PIX_FMT_YUV420P &&
00323 avctx->pix_fmt != PIX_FMT_YUV422P) ||
00324 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
00325 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
00326 return -1;
00327 }
00328 break;
00329 default:
00330 if (avctx->pix_fmt != PIX_FMT_YUV420P) {
00331 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
00332 return -1;
00333 }
00334 }
00335
00336 switch (avctx->pix_fmt) {
00337 case PIX_FMT_YUVJ422P:
00338 case PIX_FMT_YUV422P:
00339 s->chroma_format = CHROMA_422;
00340 break;
00341 case PIX_FMT_YUVJ420P:
00342 case PIX_FMT_YUV420P:
00343 default:
00344 s->chroma_format = CHROMA_420;
00345 break;
00346 }
00347
00348 s->bit_rate = avctx->bit_rate;
00349 s->width = avctx->width;
00350 s->height = avctx->height;
00351 if (avctx->gop_size > 600 &&
00352 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
00353 av_log(avctx, AV_LOG_WARNING,
00354 "keyframe interval too large!, reducing it from %d to %d\n",
00355 avctx->gop_size, 600);
00356 avctx->gop_size = 600;
00357 }
00358 s->gop_size = avctx->gop_size;
00359 s->avctx = avctx;
00360 s->flags = avctx->flags;
00361 s->flags2 = avctx->flags2;
00362 s->max_b_frames = avctx->max_b_frames;
00363 s->codec_id = avctx->codec->id;
00364 #if FF_API_MPV_GLOBAL_OPTS
00365 if (avctx->luma_elim_threshold)
00366 s->luma_elim_threshold = avctx->luma_elim_threshold;
00367 if (avctx->chroma_elim_threshold)
00368 s->chroma_elim_threshold = avctx->chroma_elim_threshold;
00369 #endif
00370 s->strict_std_compliance = avctx->strict_std_compliance;
00371 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
00372 s->mpeg_quant = avctx->mpeg_quant;
00373 s->rtp_mode = !!avctx->rtp_payload_size;
00374 s->intra_dc_precision = avctx->intra_dc_precision;
00375 s->user_specified_pts = AV_NOPTS_VALUE;
00376
00377 if (s->gop_size <= 1) {
00378 s->intra_only = 1;
00379 s->gop_size = 12;
00380 } else {
00381 s->intra_only = 0;
00382 }
00383
00384 s->me_method = avctx->me_method;
00385
00386
00387 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
00388
00389 #if FF_API_MPV_GLOBAL_OPTS
00390 if (s->flags & CODEC_FLAG_QP_RD)
00391 s->mpv_flags |= FF_MPV_FLAG_QP_RD;
00392 #endif
00393
00394 s->adaptive_quant = (s->avctx->lumi_masking ||
00395 s->avctx->dark_masking ||
00396 s->avctx->temporal_cplx_masking ||
00397 s->avctx->spatial_cplx_masking ||
00398 s->avctx->p_masking ||
00399 s->avctx->border_masking ||
00400 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
00401 !s->fixed_qscale;
00402
00403 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
00404
00405 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
00406 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
00407 if (avctx->rc_max_rate && !avctx->rc_buffer_size)
00408 return -1;
00409 }
00410
00411 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
00412 av_log(avctx, AV_LOG_INFO,
00413 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
00414 }
00415
00416 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
00417 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
00418 return -1;
00419 }
00420
00421 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
00422 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
00423 return -1;
00424 }
00425
00426 if (avctx->rc_max_rate &&
00427 avctx->rc_max_rate == avctx->bit_rate &&
00428 avctx->rc_max_rate != avctx->rc_min_rate) {
00429 av_log(avctx, AV_LOG_INFO,
00430 "impossible bitrate constraints, this will fail\n");
00431 }
00432
00433 if (avctx->rc_buffer_size &&
00434 avctx->bit_rate * (int64_t)avctx->time_base.num >
00435 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
00436 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
00437 return -1;
00438 }
00439
00440 if (!s->fixed_qscale &&
00441 avctx->bit_rate * av_q2d(avctx->time_base) >
00442 avctx->bit_rate_tolerance) {
00443 av_log(avctx, AV_LOG_ERROR,
00444 "bitrate tolerance too small for bitrate\n");
00445 return -1;
00446 }
00447
00448 if (s->avctx->rc_max_rate &&
00449 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
00450 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
00451 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
00452 90000LL * (avctx->rc_buffer_size - 1) >
00453 s->avctx->rc_max_rate * 0xFFFFLL) {
00454 av_log(avctx, AV_LOG_INFO,
00455 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
00456 "specified vbv buffer is too large for the given bitrate!\n");
00457 }
00458
00459 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
00460 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
00461 s->codec_id != AV_CODEC_ID_FLV1) {
00462 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
00463 return -1;
00464 }
00465
00466 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
00467 av_log(avctx, AV_LOG_ERROR,
00468 "OBMC is only supported with simple mb decision\n");
00469 return -1;
00470 }
00471
00472 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
00473 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
00474 return -1;
00475 }
00476
00477 if (s->max_b_frames &&
00478 s->codec_id != AV_CODEC_ID_MPEG4 &&
00479 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
00480 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
00481 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
00482 return -1;
00483 }
00484
00485 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
00486 s->codec_id == AV_CODEC_ID_H263 ||
00487 s->codec_id == AV_CODEC_ID_H263P) &&
00488 (avctx->sample_aspect_ratio.num > 255 ||
00489 avctx->sample_aspect_ratio.den > 255)) {
00490 av_log(avctx, AV_LOG_WARNING,
00491 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
00492 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
00493 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
00494 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
00495 }
00496
00497 if ((s->codec_id == AV_CODEC_ID_H263 ||
00498 s->codec_id == AV_CODEC_ID_H263P) &&
00499 (avctx->width > 2048 ||
00500 avctx->height > 1152 )) {
00501 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
00502 return -1;
00503 }
00504 if ((s->codec_id == AV_CODEC_ID_H263 ||
00505 s->codec_id == AV_CODEC_ID_H263P) &&
00506 ((avctx->width &3) ||
00507 (avctx->height&3) )) {
00508 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
00509 return -1;
00510 }
00511
00512 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
00513 (avctx->width > 4095 ||
00514 avctx->height > 4095 )) {
00515 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
00516 return -1;
00517 }
00518
00519 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
00520 (avctx->width > 16383 ||
00521 avctx->height > 16383 )) {
00522 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
00523 return -1;
00524 }
00525
00526 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
00527 s->codec_id == AV_CODEC_ID_WMV2) &&
00528 avctx->width & 1) {
00529 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
00530 return -1;
00531 }
00532
00533 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
00534 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
00535 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
00536 return -1;
00537 }
00538
00539
00540 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
00541 av_log(avctx, AV_LOG_ERROR,
00542 "mpeg2 style quantization not supported by codec\n");
00543 return -1;
00544 }
00545
00546 #if FF_API_MPV_GLOBAL_OPTS
00547 if (s->flags & CODEC_FLAG_CBP_RD)
00548 s->mpv_flags |= FF_MPV_FLAG_CBP_RD;
00549 #endif
00550
00551 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
00552 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
00553 return -1;
00554 }
00555
00556 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
00557 s->avctx->mb_decision != FF_MB_DECISION_RD) {
00558 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
00559 return -1;
00560 }
00561
00562 if (s->avctx->scenechange_threshold < 1000000000 &&
00563 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
00564 av_log(avctx, AV_LOG_ERROR,
00565 "closed gop with scene change detection are not supported yet, "
00566 "set threshold to 1000000000\n");
00567 return -1;
00568 }
00569
00570 if (s->flags & CODEC_FLAG_LOW_DELAY) {
00571 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
00572 av_log(avctx, AV_LOG_ERROR,
00573 "low delay forcing is only available for mpeg2\n");
00574 return -1;
00575 }
00576 if (s->max_b_frames != 0) {
00577 av_log(avctx, AV_LOG_ERROR,
00578 "b frames cannot be used with low delay\n");
00579 return -1;
00580 }
00581 }
00582
00583 if (s->q_scale_type == 1) {
00584 if (avctx->qmax > 12) {
00585 av_log(avctx, AV_LOG_ERROR,
00586 "non linear quant only supports qmax <= 12 currently\n");
00587 return -1;
00588 }
00589 }
00590
00591 if (s->avctx->thread_count > 1 &&
00592 s->codec_id != AV_CODEC_ID_MPEG4 &&
00593 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
00594 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
00595 s->codec_id != AV_CODEC_ID_MJPEG &&
00596 (s->codec_id != AV_CODEC_ID_H263P)) {
00597 av_log(avctx, AV_LOG_ERROR,
00598 "multi threaded encoding not supported by codec\n");
00599 return -1;
00600 }
00601
00602 if (s->avctx->thread_count < 1) {
00603 av_log(avctx, AV_LOG_ERROR,
00604 "automatic thread number detection not supported by codec, "
00605 "patch welcome\n");
00606 return -1;
00607 }
00608
00609 if (s->avctx->thread_count > 1)
00610 s->rtp_mode = 1;
00611
00612 if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
00613 s->h263_slice_structured = 1;
00614
00615 if (!avctx->time_base.den || !avctx->time_base.num) {
00616 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
00617 return -1;
00618 }
00619
00620 i = (INT_MAX / 2 + 128) >> 8;
00621 if (avctx->me_threshold >= i) {
00622 av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
00623 i - 1);
00624 return -1;
00625 }
00626 if (avctx->mb_threshold >= i) {
00627 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
00628 i - 1);
00629 return -1;
00630 }
00631
00632 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
00633 av_log(avctx, AV_LOG_INFO,
00634 "notice: b_frame_strategy only affects the first pass\n");
00635 avctx->b_frame_strategy = 0;
00636 }
00637
00638 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
00639 if (i > 1) {
00640 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
00641 avctx->time_base.den /= i;
00642 avctx->time_base.num /= i;
00643
00644 }
00645
00646 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
00647
00648 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
00649 s->inter_quant_bias = 0;
00650 } else {
00651 s->intra_quant_bias = 0;
00652
00653 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
00654 }
00655
00656 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
00657 s->intra_quant_bias = avctx->intra_quant_bias;
00658 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
00659 s->inter_quant_bias = avctx->inter_quant_bias;
00660
00661 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
00662
00663 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
00664 &chroma_v_shift);
00665
00666 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
00667 s->avctx->time_base.den > (1 << 16) - 1) {
00668 av_log(avctx, AV_LOG_ERROR,
00669 "timebase %d/%d not supported by MPEG 4 standard, "
00670 "the maximum admitted value for the timebase denominator "
00671 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
00672 (1 << 16) - 1);
00673 return -1;
00674 }
00675 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
00676
00677 #if FF_API_MPV_GLOBAL_OPTS
00678 if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
00679 s->mpv_flags |= FF_MPV_FLAG_SKIP_RD;
00680 if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
00681 s->mpv_flags |= FF_MPV_FLAG_STRICT_GOP;
00682 if (avctx->quantizer_noise_shaping)
00683 s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
00684 #endif
00685
00686 switch (avctx->codec->id) {
00687 case AV_CODEC_ID_MPEG1VIDEO:
00688 s->out_format = FMT_MPEG1;
00689 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
00690 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00691 break;
00692 case AV_CODEC_ID_MPEG2VIDEO:
00693 s->out_format = FMT_MPEG1;
00694 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
00695 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00696 s->rtp_mode = 1;
00697 break;
00698 case AV_CODEC_ID_LJPEG:
00699 case AV_CODEC_ID_MJPEG:
00700 case AV_CODEC_ID_AMV:
00701 s->out_format = FMT_MJPEG;
00702 s->intra_only = 1;
00703 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
00704 (avctx->pix_fmt == PIX_FMT_BGR0
00705 || s->avctx->pix_fmt == PIX_FMT_BGRA
00706 || s->avctx->pix_fmt == PIX_FMT_BGR24)) {
00707 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
00708 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
00709 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
00710 } else {
00711 s->mjpeg_vsample[0] = 2;
00712 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
00713 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
00714 s->mjpeg_hsample[0] = 2;
00715 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
00716 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
00717 }
00718 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
00719 ff_mjpeg_encode_init(s) < 0)
00720 return -1;
00721 avctx->delay = 0;
00722 s->low_delay = 1;
00723 break;
00724 case AV_CODEC_ID_H261:
00725 if (!CONFIG_H261_ENCODER)
00726 return -1;
00727 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
00728 av_log(avctx, AV_LOG_ERROR,
00729 "The specified picture size of %dx%d is not valid for the "
00730 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
00731 s->width, s->height);
00732 return -1;
00733 }
00734 s->out_format = FMT_H261;
00735 avctx->delay = 0;
00736 s->low_delay = 1;
00737 break;
00738 case AV_CODEC_ID_H263:
00739 if (!CONFIG_H263_ENCODER)
00740 return -1;
00741 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
00742 s->width, s->height) == 8) {
00743 av_log(avctx, AV_LOG_ERROR,
00744 "The specified picture size of %dx%d is not valid for "
00745 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
00746 "352x288, 704x576, and 1408x1152. "
00747 "Try H.263+.\n", s->width, s->height);
00748 return -1;
00749 }
00750 s->out_format = FMT_H263;
00751 avctx->delay = 0;
00752 s->low_delay = 1;
00753 break;
00754 case AV_CODEC_ID_H263P:
00755 s->out_format = FMT_H263;
00756 s->h263_plus = 1;
00757
00758 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
00759 s->modified_quant = s->h263_aic;
00760 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
00761 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
00762
00763
00764
00765 avctx->delay = 0;
00766 s->low_delay = 1;
00767 break;
00768 case AV_CODEC_ID_FLV1:
00769 s->out_format = FMT_H263;
00770 s->h263_flv = 2;
00771 s->unrestricted_mv = 1;
00772 s->rtp_mode = 0;
00773 avctx->delay = 0;
00774 s->low_delay = 1;
00775 break;
00776 case AV_CODEC_ID_RV10:
00777 s->out_format = FMT_H263;
00778 avctx->delay = 0;
00779 s->low_delay = 1;
00780 break;
00781 case AV_CODEC_ID_RV20:
00782 s->out_format = FMT_H263;
00783 avctx->delay = 0;
00784 s->low_delay = 1;
00785 s->modified_quant = 1;
00786 s->h263_aic = 1;
00787 s->h263_plus = 1;
00788 s->loop_filter = 1;
00789 s->unrestricted_mv = 0;
00790 break;
00791 case AV_CODEC_ID_MPEG4:
00792 s->out_format = FMT_H263;
00793 s->h263_pred = 1;
00794 s->unrestricted_mv = 1;
00795 s->low_delay = s->max_b_frames ? 0 : 1;
00796 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00797 break;
00798 case AV_CODEC_ID_MSMPEG4V2:
00799 s->out_format = FMT_H263;
00800 s->h263_pred = 1;
00801 s->unrestricted_mv = 1;
00802 s->msmpeg4_version = 2;
00803 avctx->delay = 0;
00804 s->low_delay = 1;
00805 break;
00806 case AV_CODEC_ID_MSMPEG4V3:
00807 s->out_format = FMT_H263;
00808 s->h263_pred = 1;
00809 s->unrestricted_mv = 1;
00810 s->msmpeg4_version = 3;
00811 s->flipflop_rounding = 1;
00812 avctx->delay = 0;
00813 s->low_delay = 1;
00814 break;
00815 case AV_CODEC_ID_WMV1:
00816 s->out_format = FMT_H263;
00817 s->h263_pred = 1;
00818 s->unrestricted_mv = 1;
00819 s->msmpeg4_version = 4;
00820 s->flipflop_rounding = 1;
00821 avctx->delay = 0;
00822 s->low_delay = 1;
00823 break;
00824 case AV_CODEC_ID_WMV2:
00825 s->out_format = FMT_H263;
00826 s->h263_pred = 1;
00827 s->unrestricted_mv = 1;
00828 s->msmpeg4_version = 5;
00829 s->flipflop_rounding = 1;
00830 avctx->delay = 0;
00831 s->low_delay = 1;
00832 break;
00833 default:
00834 return -1;
00835 }
00836
00837 avctx->has_b_frames = !s->low_delay;
00838
00839 s->encoding = 1;
00840
00841 s->progressive_frame =
00842 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
00843 CODEC_FLAG_INTERLACED_ME) ||
00844 s->alternate_scan);
00845
00846
00847 if (ff_MPV_common_init(s) < 0)
00848 return -1;
00849
00850 ff_dct_encode_init(s);
00851
00852 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
00853 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
00854
00855 s->quant_precision = 5;
00856
00857 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
00858 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
00859
00860 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
00861 ff_h261_encode_init(s);
00862 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
00863 ff_h263_encode_init(s);
00864 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
00865 ff_msmpeg4_encode_init(s);
00866 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
00867 && s->out_format == FMT_MPEG1)
00868 ff_mpeg1_encode_init(s);
00869
00870
00871 for (i = 0; i < 64; i++) {
00872 int j = s->dsp.idct_permutation[i];
00873 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
00874 s->mpeg_quant) {
00875 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
00876 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
00877 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
00878 s->intra_matrix[j] =
00879 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00880 } else {
00881
00882 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
00883 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00884 }
00885 if (s->avctx->intra_matrix)
00886 s->intra_matrix[j] = s->avctx->intra_matrix[i];
00887 if (s->avctx->inter_matrix)
00888 s->inter_matrix[j] = s->avctx->inter_matrix[i];
00889 }
00890
00891
00892
00893 if (s->out_format != FMT_MJPEG) {
00894 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
00895 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
00896 31, 1);
00897 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
00898 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
00899 31, 0);
00900 }
00901
00902 if (ff_rate_control_init(s) < 0)
00903 return -1;
00904
00905 return 0;
00906 }
00907
00908 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
00909 {
00910 MpegEncContext *s = avctx->priv_data;
00911
00912 ff_rate_control_uninit(s);
00913
00914 ff_MPV_common_end(s);
00915 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
00916 s->out_format == FMT_MJPEG)
00917 ff_mjpeg_encode_close(s);
00918
00919 av_freep(&avctx->extradata);
00920
00921 return 0;
00922 }
00923
00924 static int get_sae(uint8_t *src, int ref, int stride)
00925 {
00926 int x,y;
00927 int acc = 0;
00928
00929 for (y = 0; y < 16; y++) {
00930 for (x = 0; x < 16; x++) {
00931 acc += FFABS(src[x + y * stride] - ref);
00932 }
00933 }
00934
00935 return acc;
00936 }
00937
00938 static int get_intra_count(MpegEncContext *s, uint8_t *src,
00939 uint8_t *ref, int stride)
00940 {
00941 int x, y, w, h;
00942 int acc = 0;
00943
00944 w = s->width & ~15;
00945 h = s->height & ~15;
00946
00947 for (y = 0; y < h; y += 16) {
00948 for (x = 0; x < w; x += 16) {
00949 int offset = x + y * stride;
00950 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
00951 16);
00952 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
00953 int sae = get_sae(src + offset, mean, stride);
00954
00955 acc += sae + 500 < sad;
00956 }
00957 }
00958 return acc;
00959 }
00960
00961
00962 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
00963 {
00964 AVFrame *pic = NULL;
00965 int64_t pts;
00966 int i;
00967 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
00968 (s->low_delay ? 0 : 1);
00969 int direct = 1;
00970
00971 if (pic_arg) {
00972 pts = pic_arg->pts;
00973 pic_arg->display_picture_number = s->input_picture_number++;
00974
00975 if (pts != AV_NOPTS_VALUE) {
00976 if (s->user_specified_pts != AV_NOPTS_VALUE) {
00977 int64_t time = pts;
00978 int64_t last = s->user_specified_pts;
00979
00980 if (time <= last) {
00981 av_log(s->avctx, AV_LOG_ERROR,
00982 "Error, Invalid timestamp=%"PRId64", "
00983 "last=%"PRId64"\n", pts, s->user_specified_pts);
00984 return -1;
00985 }
00986
00987 if (!s->low_delay && pic_arg->display_picture_number == 1)
00988 s->dts_delta = time - last;
00989 }
00990 s->user_specified_pts = pts;
00991 } else {
00992 if (s->user_specified_pts != AV_NOPTS_VALUE) {
00993 s->user_specified_pts =
00994 pts = s->user_specified_pts + 1;
00995 av_log(s->avctx, AV_LOG_INFO,
00996 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
00997 pts);
00998 } else {
00999 pts = pic_arg->display_picture_number;
01000 }
01001 }
01002 }
01003
01004 if (pic_arg) {
01005 if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
01006 direct = 0;
01007 if (pic_arg->linesize[0] != s->linesize)
01008 direct = 0;
01009 if (pic_arg->linesize[1] != s->uvlinesize)
01010 direct = 0;
01011 if (pic_arg->linesize[2] != s->uvlinesize)
01012 direct = 0;
01013
01014
01015
01016
01017 if (direct) {
01018 i = ff_find_unused_picture(s, 1);
01019 if (i < 0)
01020 return i;
01021
01022 pic = &s->picture[i].f;
01023 pic->reference = 3;
01024
01025 for (i = 0; i < 4; i++) {
01026 pic->data[i] = pic_arg->data[i];
01027 pic->linesize[i] = pic_arg->linesize[i];
01028 }
01029 if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
01030 return -1;
01031 }
01032 } else {
01033 i = ff_find_unused_picture(s, 0);
01034 if (i < 0)
01035 return i;
01036
01037 pic = &s->picture[i].f;
01038 pic->reference = 3;
01039
01040 if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
01041 return -1;
01042 }
01043
01044 if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
01045 pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
01046 pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
01047
01048 } else {
01049 int h_chroma_shift, v_chroma_shift;
01050 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift,
01051 &v_chroma_shift);
01052
01053 for (i = 0; i < 3; i++) {
01054 int src_stride = pic_arg->linesize[i];
01055 int dst_stride = i ? s->uvlinesize : s->linesize;
01056 int h_shift = i ? h_chroma_shift : 0;
01057 int v_shift = i ? v_chroma_shift : 0;
01058 int w = s->width >> h_shift;
01059 int h = s->height >> v_shift;
01060 uint8_t *src = pic_arg->data[i];
01061 uint8_t *dst = pic->data[i];
01062
01063 if(s->codec_id == AV_CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)){
01064 h= ((s->height+15)/16*16)>>v_shift;
01065 }
01066
01067 if (!s->avctx->rc_buffer_size)
01068 dst += INPLACE_OFFSET;
01069
01070 if (src_stride == dst_stride)
01071 memcpy(dst, src, src_stride * h);
01072 else {
01073 while (h--) {
01074 memcpy(dst, src, w);
01075 dst += dst_stride;
01076 src += src_stride;
01077 }
01078 }
01079 }
01080 }
01081 }
01082 copy_picture_attributes(s, pic, pic_arg);
01083 pic->pts = pts;
01084 }
01085
01086
01087 for (i = 1; i < MAX_PICTURE_COUNT ; i++)
01088 s->input_picture[i - 1] = s->input_picture[i];
01089
01090 s->input_picture[encoding_delay] = (Picture*) pic;
01091
01092 return 0;
01093 }
01094
01095 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
01096 {
01097 int x, y, plane;
01098 int score = 0;
01099 int64_t score64 = 0;
01100
01101 for (plane = 0; plane < 3; plane++) {
01102 const int stride = p->f.linesize[plane];
01103 const int bw = plane ? 1 : 2;
01104 for (y = 0; y < s->mb_height * bw; y++) {
01105 for (x = 0; x < s->mb_width * bw; x++) {
01106 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
01107 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
01108 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
01109 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
01110
01111 switch (s->avctx->frame_skip_exp) {
01112 case 0: score = FFMAX(score, v); break;
01113 case 1: score += FFABS(v); break;
01114 case 2: score += v * v; break;
01115 case 3: score64 += FFABS(v * v * (int64_t)v); break;
01116 case 4: score64 += v * v * (int64_t)(v * v); break;
01117 }
01118 }
01119 }
01120 }
01121
01122 if (score)
01123 score64 = score;
01124
01125 if (score64 < s->avctx->frame_skip_threshold)
01126 return 1;
01127 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
01128 return 1;
01129 return 0;
01130 }
01131
01132 static int encode_frame(AVCodecContext *c, AVFrame *frame)
01133 {
01134 AVPacket pkt = { 0 };
01135 int ret, got_output;
01136
01137 av_init_packet(&pkt);
01138 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
01139 if (ret < 0)
01140 return ret;
01141
01142 ret = pkt.size;
01143 av_free_packet(&pkt);
01144 return ret;
01145 }
01146
01147 static int estimate_best_b_count(MpegEncContext *s)
01148 {
01149 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
01150 AVCodecContext *c = avcodec_alloc_context3(NULL);
01151 AVFrame input[FF_MAX_B_FRAMES + 2];
01152 const int scale = s->avctx->brd_scale;
01153 int i, j, out_size, p_lambda, b_lambda, lambda2;
01154 int64_t best_rd = INT64_MAX;
01155 int best_b_count = -1;
01156
01157 av_assert0(scale >= 0 && scale <= 3);
01158
01159
01160
01161 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
01162
01163 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
01164 if (!b_lambda)
01165 b_lambda = p_lambda;
01166 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
01167 FF_LAMBDA_SHIFT;
01168
01169 c->width = s->width >> scale;
01170 c->height = s->height >> scale;
01171 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
01172 CODEC_FLAG_INPUT_PRESERVED ;
01173 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
01174 c->mb_decision = s->avctx->mb_decision;
01175 c->me_cmp = s->avctx->me_cmp;
01176 c->mb_cmp = s->avctx->mb_cmp;
01177 c->me_sub_cmp = s->avctx->me_sub_cmp;
01178 c->pix_fmt = PIX_FMT_YUV420P;
01179 c->time_base = s->avctx->time_base;
01180 c->max_b_frames = s->max_b_frames;
01181
01182 if (avcodec_open2(c, codec, NULL) < 0)
01183 return -1;
01184
01185 for (i = 0; i < s->max_b_frames + 2; i++) {
01186 int ysize = c->width * c->height;
01187 int csize = (c->width / 2) * (c->height / 2);
01188 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
01189 s->next_picture_ptr;
01190
01191 avcodec_get_frame_defaults(&input[i]);
01192 input[i].data[0] = av_malloc(ysize + 2 * csize);
01193 input[i].data[1] = input[i].data[0] + ysize;
01194 input[i].data[2] = input[i].data[1] + csize;
01195 input[i].linesize[0] = c->width;
01196 input[i].linesize[1] =
01197 input[i].linesize[2] = c->width / 2;
01198
01199 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
01200 pre_input = *pre_input_ptr;
01201
01202 if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
01203 pre_input.f.data[0] += INPLACE_OFFSET;
01204 pre_input.f.data[1] += INPLACE_OFFSET;
01205 pre_input.f.data[2] += INPLACE_OFFSET;
01206 }
01207
01208 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
01209 pre_input.f.data[0], pre_input.f.linesize[0],
01210 c->width, c->height);
01211 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
01212 pre_input.f.data[1], pre_input.f.linesize[1],
01213 c->width >> 1, c->height >> 1);
01214 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
01215 pre_input.f.data[2], pre_input.f.linesize[2],
01216 c->width >> 1, c->height >> 1);
01217 }
01218 }
01219
01220 for (j = 0; j < s->max_b_frames + 1; j++) {
01221 int64_t rd = 0;
01222
01223 if (!s->input_picture[j])
01224 break;
01225
01226 c->error[0] = c->error[1] = c->error[2] = 0;
01227
01228 input[0].pict_type = AV_PICTURE_TYPE_I;
01229 input[0].quality = 1 * FF_QP2LAMBDA;
01230
01231 out_size = encode_frame(c, &input[0]);
01232
01233
01234
01235 for (i = 0; i < s->max_b_frames + 1; i++) {
01236 int is_p = i % (j + 1) == j || i == s->max_b_frames;
01237
01238 input[i + 1].pict_type = is_p ?
01239 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
01240 input[i + 1].quality = is_p ? p_lambda : b_lambda;
01241
01242 out_size = encode_frame(c, &input[i + 1]);
01243
01244 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
01245 }
01246
01247
01248 while (out_size) {
01249 out_size = encode_frame(c, NULL);
01250 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
01251 }
01252
01253 rd += c->error[0] + c->error[1] + c->error[2];
01254
01255 if (rd < best_rd) {
01256 best_rd = rd;
01257 best_b_count = j;
01258 }
01259 }
01260
01261 avcodec_close(c);
01262 av_freep(&c);
01263
01264 for (i = 0; i < s->max_b_frames + 2; i++) {
01265 av_freep(&input[i].data[0]);
01266 }
01267
01268 return best_b_count;
01269 }
01270
01271 static int select_input_picture(MpegEncContext *s)
01272 {
01273 int i;
01274
01275 for (i = 1; i < MAX_PICTURE_COUNT; i++)
01276 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
01277 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
01278
01279
01280 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
01281 if (
01282 s->next_picture_ptr == NULL || s->intra_only) {
01283 s->reordered_input_picture[0] = s->input_picture[0];
01284 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
01285 s->reordered_input_picture[0]->f.coded_picture_number =
01286 s->coded_picture_number++;
01287 } else {
01288 int b_frames;
01289
01290 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
01291 if (s->picture_in_gop_number < s->gop_size &&
01292 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
01293
01294
01295
01296
01297
01298 if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
01299 for (i = 0; i < 4; i++)
01300 s->input_picture[0]->f.data[i] = NULL;
01301 s->input_picture[0]->f.type = 0;
01302 } else {
01303 assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
01304 s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
01305
01306 s->avctx->release_buffer(s->avctx,
01307 &s->input_picture[0]->f);
01308 }
01309
01310 emms_c();
01311 ff_vbv_update(s, 0);
01312
01313 goto no_output_pic;
01314 }
01315 }
01316
01317 if (s->flags & CODEC_FLAG_PASS2) {
01318 for (i = 0; i < s->max_b_frames + 1; i++) {
01319 int pict_num = s->input_picture[0]->f.display_picture_number + i;
01320
01321 if (pict_num >= s->rc_context.num_entries)
01322 break;
01323 if (!s->input_picture[i]) {
01324 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
01325 break;
01326 }
01327
01328 s->input_picture[i]->f.pict_type =
01329 s->rc_context.entry[pict_num].new_pict_type;
01330 }
01331 }
01332
01333 if (s->avctx->b_frame_strategy == 0) {
01334 b_frames = s->max_b_frames;
01335 while (b_frames && !s->input_picture[b_frames])
01336 b_frames--;
01337 } else if (s->avctx->b_frame_strategy == 1) {
01338 for (i = 1; i < s->max_b_frames + 1; i++) {
01339 if (s->input_picture[i] &&
01340 s->input_picture[i]->b_frame_score == 0) {
01341 s->input_picture[i]->b_frame_score =
01342 get_intra_count(s,
01343 s->input_picture[i ]->f.data[0],
01344 s->input_picture[i - 1]->f.data[0],
01345 s->linesize) + 1;
01346 }
01347 }
01348 for (i = 0; i < s->max_b_frames + 1; i++) {
01349 if (s->input_picture[i] == NULL ||
01350 s->input_picture[i]->b_frame_score - 1 >
01351 s->mb_num / s->avctx->b_sensitivity)
01352 break;
01353 }
01354
01355 b_frames = FFMAX(0, i - 1);
01356
01357
01358 for (i = 0; i < b_frames + 1; i++) {
01359 s->input_picture[i]->b_frame_score = 0;
01360 }
01361 } else if (s->avctx->b_frame_strategy == 2) {
01362 b_frames = estimate_best_b_count(s);
01363 } else {
01364 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
01365 b_frames = 0;
01366 }
01367
01368 emms_c();
01369
01370
01371
01372
01373 for (i = b_frames - 1; i >= 0; i--) {
01374 int type = s->input_picture[i]->f.pict_type;
01375 if (type && type != AV_PICTURE_TYPE_B)
01376 b_frames = i;
01377 }
01378 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
01379 b_frames == s->max_b_frames) {
01380 av_log(s->avctx, AV_LOG_ERROR,
01381 "warning, too many b frames in a row\n");
01382 }
01383
01384 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
01385 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
01386 s->gop_size > s->picture_in_gop_number) {
01387 b_frames = s->gop_size - s->picture_in_gop_number - 1;
01388 } else {
01389 if (s->flags & CODEC_FLAG_CLOSED_GOP)
01390 b_frames = 0;
01391 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
01392 }
01393 }
01394
01395 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
01396 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
01397 b_frames--;
01398
01399 s->reordered_input_picture[0] = s->input_picture[b_frames];
01400 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
01401 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
01402 s->reordered_input_picture[0]->f.coded_picture_number =
01403 s->coded_picture_number++;
01404 for (i = 0; i < b_frames; i++) {
01405 s->reordered_input_picture[i + 1] = s->input_picture[i];
01406 s->reordered_input_picture[i + 1]->f.pict_type =
01407 AV_PICTURE_TYPE_B;
01408 s->reordered_input_picture[i + 1]->f.coded_picture_number =
01409 s->coded_picture_number++;
01410 }
01411 }
01412 }
01413 no_output_pic:
01414 if (s->reordered_input_picture[0]) {
01415 s->reordered_input_picture[0]->f.reference =
01416 s->reordered_input_picture[0]->f.pict_type !=
01417 AV_PICTURE_TYPE_B ? 3 : 0;
01418
01419 ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
01420
01421 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
01422 s->avctx->rc_buffer_size) {
01423
01424
01425
01426 Picture *pic;
01427 int i = ff_find_unused_picture(s, 0);
01428 if (i < 0)
01429 return i;
01430 pic = &s->picture[i];
01431
01432 pic->f.reference = s->reordered_input_picture[0]->f.reference;
01433 if (ff_alloc_picture(s, pic, 0) < 0) {
01434 return -1;
01435 }
01436
01437
01438 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
01439 s->avctx->release_buffer(s->avctx,
01440 &s->reordered_input_picture[0]->f);
01441 for (i = 0; i < 4; i++)
01442 s->reordered_input_picture[0]->f.data[i] = NULL;
01443 s->reordered_input_picture[0]->f.type = 0;
01444
01445 copy_picture_attributes(s, &pic->f,
01446 &s->reordered_input_picture[0]->f);
01447
01448 s->current_picture_ptr = pic;
01449 } else {
01450
01451
01452 assert(s->reordered_input_picture[0]->f.type ==
01453 FF_BUFFER_TYPE_USER ||
01454 s->reordered_input_picture[0]->f.type ==
01455 FF_BUFFER_TYPE_INTERNAL);
01456
01457 s->current_picture_ptr = s->reordered_input_picture[0];
01458 for (i = 0; i < 4; i++) {
01459 s->new_picture.f.data[i] += INPLACE_OFFSET;
01460 }
01461 }
01462 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01463
01464 s->picture_number = s->new_picture.f.display_picture_number;
01465
01466 } else {
01467 memset(&s->new_picture, 0, sizeof(Picture));
01468 }
01469 return 0;
01470 }
01471
01472 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
01473 AVFrame *pic_arg, int *got_packet)
01474 {
01475 MpegEncContext *s = avctx->priv_data;
01476 int i, stuffing_count, ret;
01477 int context_count = s->slice_context_count;
01478
01479 s->picture_in_gop_number++;
01480
01481 if (load_input_picture(s, pic_arg) < 0)
01482 return -1;
01483
01484 if (select_input_picture(s) < 0) {
01485 return -1;
01486 }
01487
01488
01489 if (s->new_picture.f.data[0]) {
01490 if ((ret = ff_alloc_packet2(avctx, pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
01491 return ret;
01492 if (s->mb_info) {
01493 s->mb_info_ptr = av_packet_new_side_data(pkt,
01494 AV_PKT_DATA_H263_MB_INFO,
01495 s->mb_width*s->mb_height*12);
01496 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
01497 }
01498
01499 for (i = 0; i < context_count; i++) {
01500 int start_y = s->thread_context[i]->start_mb_y;
01501 int end_y = s->thread_context[i]-> end_mb_y;
01502 int h = s->mb_height;
01503 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
01504 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
01505
01506 init_put_bits(&s->thread_context[i]->pb, start, end - start);
01507 }
01508
01509 s->pict_type = s->new_picture.f.pict_type;
01510
01511
01512
01513 ff_MPV_frame_start(s, avctx);
01514 vbv_retry:
01515 if (encode_picture(s, s->picture_number) < 0)
01516 return -1;
01517
01518 avctx->header_bits = s->header_bits;
01519 avctx->mv_bits = s->mv_bits;
01520 avctx->misc_bits = s->misc_bits;
01521 avctx->i_tex_bits = s->i_tex_bits;
01522 avctx->p_tex_bits = s->p_tex_bits;
01523 avctx->i_count = s->i_count;
01524
01525 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
01526 avctx->skip_count = s->skip_count;
01527
01528 ff_MPV_frame_end(s);
01529
01530 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
01531 ff_mjpeg_encode_picture_trailer(s);
01532
01533 if (avctx->rc_buffer_size) {
01534 RateControlContext *rcc = &s->rc_context;
01535 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
01536
01537 if (put_bits_count(&s->pb) > max_size &&
01538 s->lambda < s->avctx->lmax) {
01539 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
01540 (s->qscale + 1) / s->qscale);
01541 if (s->adaptive_quant) {
01542 int i;
01543 for (i = 0; i < s->mb_height * s->mb_stride; i++)
01544 s->lambda_table[i] =
01545 FFMAX(s->lambda_table[i] + 1,
01546 s->lambda_table[i] * (s->qscale + 1) /
01547 s->qscale);
01548 }
01549 s->mb_skipped = 0;
01550
01551 if (s->pict_type == AV_PICTURE_TYPE_P) {
01552 if (s->flipflop_rounding ||
01553 s->codec_id == AV_CODEC_ID_H263P ||
01554 s->codec_id == AV_CODEC_ID_MPEG4)
01555 s->no_rounding ^= 1;
01556 }
01557 if (s->pict_type != AV_PICTURE_TYPE_B) {
01558 s->time_base = s->last_time_base;
01559 s->last_non_b_time = s->time - s->pp_time;
01560 }
01561
01562 for (i = 0; i < context_count; i++) {
01563 PutBitContext *pb = &s->thread_context[i]->pb;
01564 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
01565 }
01566 goto vbv_retry;
01567 }
01568
01569 assert(s->avctx->rc_max_rate);
01570 }
01571
01572 if (s->flags & CODEC_FLAG_PASS1)
01573 ff_write_pass1_stats(s);
01574
01575 for (i = 0; i < 4; i++) {
01576 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
01577 avctx->error[i] += s->current_picture_ptr->f.error[i];
01578 }
01579
01580 if (s->flags & CODEC_FLAG_PASS1)
01581 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
01582 avctx->i_tex_bits + avctx->p_tex_bits ==
01583 put_bits_count(&s->pb));
01584 flush_put_bits(&s->pb);
01585 s->frame_bits = put_bits_count(&s->pb);
01586
01587 stuffing_count = ff_vbv_update(s, s->frame_bits);
01588 s->stuffing_bits = 8*stuffing_count;
01589 if (stuffing_count) {
01590 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
01591 stuffing_count + 50) {
01592 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
01593 return -1;
01594 }
01595
01596 switch (s->codec_id) {
01597 case AV_CODEC_ID_MPEG1VIDEO:
01598 case AV_CODEC_ID_MPEG2VIDEO:
01599 while (stuffing_count--) {
01600 put_bits(&s->pb, 8, 0);
01601 }
01602 break;
01603 case AV_CODEC_ID_MPEG4:
01604 put_bits(&s->pb, 16, 0);
01605 put_bits(&s->pb, 16, 0x1C3);
01606 stuffing_count -= 4;
01607 while (stuffing_count--) {
01608 put_bits(&s->pb, 8, 0xFF);
01609 }
01610 break;
01611 default:
01612 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
01613 }
01614 flush_put_bits(&s->pb);
01615 s->frame_bits = put_bits_count(&s->pb);
01616 }
01617
01618
01619 if (s->avctx->rc_max_rate &&
01620 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
01621 s->out_format == FMT_MPEG1 &&
01622 90000LL * (avctx->rc_buffer_size - 1) <=
01623 s->avctx->rc_max_rate * 0xFFFFLL) {
01624 int vbv_delay, min_delay;
01625 double inbits = s->avctx->rc_max_rate *
01626 av_q2d(s->avctx->time_base);
01627 int minbits = s->frame_bits - 8 *
01628 (s->vbv_delay_ptr - s->pb.buf - 1);
01629 double bits = s->rc_context.buffer_index + minbits - inbits;
01630
01631 if (bits < 0)
01632 av_log(s->avctx, AV_LOG_ERROR,
01633 "Internal error, negative bits\n");
01634
01635 assert(s->repeat_first_field == 0);
01636
01637 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
01638 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
01639 s->avctx->rc_max_rate;
01640
01641 vbv_delay = FFMAX(vbv_delay, min_delay);
01642
01643 av_assert0(vbv_delay < 0xFFFF);
01644
01645 s->vbv_delay_ptr[0] &= 0xF8;
01646 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
01647 s->vbv_delay_ptr[1] = vbv_delay >> 5;
01648 s->vbv_delay_ptr[2] &= 0x07;
01649 s->vbv_delay_ptr[2] |= vbv_delay << 3;
01650 avctx->vbv_delay = vbv_delay * 300;
01651 }
01652 s->total_bits += s->frame_bits;
01653 avctx->frame_bits = s->frame_bits;
01654
01655 pkt->pts = s->current_picture.f.pts;
01656 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
01657 if (!s->current_picture.f.coded_picture_number)
01658 pkt->dts = pkt->pts - s->dts_delta;
01659 else
01660 pkt->dts = s->reordered_pts;
01661 s->reordered_pts = pkt->pts;
01662 } else
01663 pkt->dts = pkt->pts;
01664 if (s->current_picture.f.key_frame)
01665 pkt->flags |= AV_PKT_FLAG_KEY;
01666 if (s->mb_info)
01667 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
01668 } else {
01669 s->frame_bits = 0;
01670 }
01671 assert((s->frame_bits & 7) == 0);
01672
01673 pkt->size = s->frame_bits / 8;
01674 *got_packet = !!pkt->size;
01675 return 0;
01676 }
01677
01678 static inline void dct_single_coeff_elimination(MpegEncContext *s,
01679 int n, int threshold)
01680 {
01681 static const char tab[64] = {
01682 3, 2, 2, 1, 1, 1, 1, 1,
01683 1, 1, 1, 1, 1, 1, 1, 1,
01684 1, 1, 1, 1, 1, 1, 1, 1,
01685 0, 0, 0, 0, 0, 0, 0, 0,
01686 0, 0, 0, 0, 0, 0, 0, 0,
01687 0, 0, 0, 0, 0, 0, 0, 0,
01688 0, 0, 0, 0, 0, 0, 0, 0,
01689 0, 0, 0, 0, 0, 0, 0, 0
01690 };
01691 int score = 0;
01692 int run = 0;
01693 int i;
01694 DCTELEM *block = s->block[n];
01695 const int last_index = s->block_last_index[n];
01696 int skip_dc;
01697
01698 if (threshold < 0) {
01699 skip_dc = 0;
01700 threshold = -threshold;
01701 } else
01702 skip_dc = 1;
01703
01704
01705 if (last_index <= skip_dc - 1)
01706 return;
01707
01708 for (i = 0; i <= last_index; i++) {
01709 const int j = s->intra_scantable.permutated[i];
01710 const int level = FFABS(block[j]);
01711 if (level == 1) {
01712 if (skip_dc && i == 0)
01713 continue;
01714 score += tab[run];
01715 run = 0;
01716 } else if (level > 1) {
01717 return;
01718 } else {
01719 run++;
01720 }
01721 }
01722 if (score >= threshold)
01723 return;
01724 for (i = skip_dc; i <= last_index; i++) {
01725 const int j = s->intra_scantable.permutated[i];
01726 block[j] = 0;
01727 }
01728 if (block[0])
01729 s->block_last_index[n] = 0;
01730 else
01731 s->block_last_index[n] = -1;
01732 }
01733
01734 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
01735 int last_index)
01736 {
01737 int i;
01738 const int maxlevel = s->max_qcoeff;
01739 const int minlevel = s->min_qcoeff;
01740 int overflow = 0;
01741
01742 if (s->mb_intra) {
01743 i = 1;
01744 } else
01745 i = 0;
01746
01747 for (; i <= last_index; i++) {
01748 const int j = s->intra_scantable.permutated[i];
01749 int level = block[j];
01750
01751 if (level > maxlevel) {
01752 level = maxlevel;
01753 overflow++;
01754 } else if (level < minlevel) {
01755 level = minlevel;
01756 overflow++;
01757 }
01758
01759 block[j] = level;
01760 }
01761
01762 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
01763 av_log(s->avctx, AV_LOG_INFO,
01764 "warning, clipping %d dct coefficients to %d..%d\n",
01765 overflow, minlevel, maxlevel);
01766 }
01767
01768 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
01769 {
01770 int x, y;
01771
01772 for (y = 0; y < 8; y++) {
01773 for (x = 0; x < 8; x++) {
01774 int x2, y2;
01775 int sum = 0;
01776 int sqr = 0;
01777 int count = 0;
01778
01779 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
01780 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
01781 int v = ptr[x2 + y2 * stride];
01782 sum += v;
01783 sqr += v * v;
01784 count++;
01785 }
01786 }
01787 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
01788 }
01789 }
01790 }
01791
01792 static av_always_inline void encode_mb_internal(MpegEncContext *s,
01793 int motion_x, int motion_y,
01794 int mb_block_height,
01795 int mb_block_count)
01796 {
01797 int16_t weight[8][64];
01798 DCTELEM orig[8][64];
01799 const int mb_x = s->mb_x;
01800 const int mb_y = s->mb_y;
01801 int i;
01802 int skip_dct[8];
01803 int dct_offset = s->linesize * 8;
01804 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01805 int wrap_y, wrap_c;
01806
01807 for (i = 0; i < mb_block_count; i++)
01808 skip_dct[i] = s->skipdct;
01809
01810 if (s->adaptive_quant) {
01811 const int last_qp = s->qscale;
01812 const int mb_xy = mb_x + mb_y * s->mb_stride;
01813
01814 s->lambda = s->lambda_table[mb_xy];
01815 update_qscale(s);
01816
01817 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
01818 s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
01819 s->dquant = s->qscale - last_qp;
01820
01821 if (s->out_format == FMT_H263) {
01822 s->dquant = av_clip(s->dquant, -2, 2);
01823
01824 if (s->codec_id == AV_CODEC_ID_MPEG4) {
01825 if (!s->mb_intra) {
01826 if (s->pict_type == AV_PICTURE_TYPE_B) {
01827 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
01828 s->dquant = 0;
01829 }
01830 if (s->mv_type == MV_TYPE_8X8)
01831 s->dquant = 0;
01832 }
01833 }
01834 }
01835 }
01836 ff_set_qscale(s, last_qp + s->dquant);
01837 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
01838 ff_set_qscale(s, s->qscale + s->dquant);
01839
01840 wrap_y = s->linesize;
01841 wrap_c = s->uvlinesize;
01842 ptr_y = s->new_picture.f.data[0] +
01843 (mb_y * 16 * wrap_y) + mb_x * 16;
01844 ptr_cb = s->new_picture.f.data[1] +
01845 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
01846 ptr_cr = s->new_picture.f.data[2] +
01847 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
01848
01849 if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
01850 uint8_t *ebuf = s->edge_emu_buffer + 32;
01851 s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
01852 mb_y * 16, s->width, s->height);
01853 ptr_y = ebuf;
01854 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
01855 mb_block_height, mb_x * 8, mb_y * 8,
01856 (s->width+1) >> 1, (s->height+1) >> 1);
01857 ptr_cb = ebuf + 18 * wrap_y;
01858 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
01859 mb_block_height, mb_x * 8, mb_y * 8,
01860 (s->width+1) >> 1, (s->height+1) >> 1);
01861 ptr_cr = ebuf + 18 * wrap_y + 8;
01862 }
01863
01864 if (s->mb_intra) {
01865 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
01866 int progressive_score, interlaced_score;
01867
01868 s->interlaced_dct = 0;
01869 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
01870 NULL, wrap_y, 8) +
01871 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
01872 NULL, wrap_y, 8) - 400;
01873
01874 if (progressive_score > 0) {
01875 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
01876 NULL, wrap_y * 2, 8) +
01877 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
01878 NULL, wrap_y * 2, 8);
01879 if (progressive_score > interlaced_score) {
01880 s->interlaced_dct = 1;
01881
01882 dct_offset = wrap_y;
01883 wrap_y <<= 1;
01884 if (s->chroma_format == CHROMA_422)
01885 wrap_c <<= 1;
01886 }
01887 }
01888 }
01889
01890 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
01891 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
01892 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
01893 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
01894
01895 if (s->flags & CODEC_FLAG_GRAY) {
01896 skip_dct[4] = 1;
01897 skip_dct[5] = 1;
01898 } else {
01899 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
01900 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
01901 if (!s->chroma_y_shift) {
01902 s->dsp.get_pixels(s->block[6],
01903 ptr_cb + (dct_offset >> 1), wrap_c);
01904 s->dsp.get_pixels(s->block[7],
01905 ptr_cr + (dct_offset >> 1), wrap_c);
01906 }
01907 }
01908 } else {
01909 op_pixels_func (*op_pix)[4];
01910 qpel_mc_func (*op_qpix)[16];
01911 uint8_t *dest_y, *dest_cb, *dest_cr;
01912
01913 dest_y = s->dest[0];
01914 dest_cb = s->dest[1];
01915 dest_cr = s->dest[2];
01916
01917 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
01918 op_pix = s->dsp.put_pixels_tab;
01919 op_qpix = s->dsp.put_qpel_pixels_tab;
01920 } else {
01921 op_pix = s->dsp.put_no_rnd_pixels_tab;
01922 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
01923 }
01924
01925 if (s->mv_dir & MV_DIR_FORWARD) {
01926 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
01927 s->last_picture.f.data,
01928 op_pix, op_qpix);
01929 op_pix = s->dsp.avg_pixels_tab;
01930 op_qpix = s->dsp.avg_qpel_pixels_tab;
01931 }
01932 if (s->mv_dir & MV_DIR_BACKWARD) {
01933 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
01934 s->next_picture.f.data,
01935 op_pix, op_qpix);
01936 }
01937
01938 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
01939 int progressive_score, interlaced_score;
01940
01941 s->interlaced_dct = 0;
01942 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
01943 ptr_y, wrap_y,
01944 8) +
01945 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
01946 ptr_y + wrap_y * 8, wrap_y,
01947 8) - 400;
01948
01949 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
01950 progressive_score -= 400;
01951
01952 if (progressive_score > 0) {
01953 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
01954 ptr_y,
01955 wrap_y * 2, 8) +
01956 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
01957 ptr_y + wrap_y,
01958 wrap_y * 2, 8);
01959
01960 if (progressive_score > interlaced_score) {
01961 s->interlaced_dct = 1;
01962
01963 dct_offset = wrap_y;
01964 wrap_y <<= 1;
01965 if (s->chroma_format == CHROMA_422)
01966 wrap_c <<= 1;
01967 }
01968 }
01969 }
01970
01971 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
01972 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
01973 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
01974 dest_y + dct_offset, wrap_y);
01975 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
01976 dest_y + dct_offset + 8, wrap_y);
01977
01978 if (s->flags & CODEC_FLAG_GRAY) {
01979 skip_dct[4] = 1;
01980 skip_dct[5] = 1;
01981 } else {
01982 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
01983 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
01984 if (!s->chroma_y_shift) {
01985 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
01986 dest_cb + (dct_offset >> 1), wrap_c);
01987 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
01988 dest_cr + (dct_offset >> 1), wrap_c);
01989 }
01990 }
01991
01992 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
01993 2 * s->qscale * s->qscale) {
01994
01995 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
01996 wrap_y, 8) < 20 * s->qscale)
01997 skip_dct[0] = 1;
01998 if (s->dsp.sad[1](NULL, ptr_y + 8,
01999 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
02000 skip_dct[1] = 1;
02001 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
02002 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
02003 skip_dct[2] = 1;
02004 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
02005 dest_y + dct_offset + 8,
02006 wrap_y, 8) < 20 * s->qscale)
02007 skip_dct[3] = 1;
02008 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
02009 wrap_c, 8) < 20 * s->qscale)
02010 skip_dct[4] = 1;
02011 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
02012 wrap_c, 8) < 20 * s->qscale)
02013 skip_dct[5] = 1;
02014 if (!s->chroma_y_shift) {
02015 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
02016 dest_cb + (dct_offset >> 1),
02017 wrap_c, 8) < 20 * s->qscale)
02018 skip_dct[6] = 1;
02019 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
02020 dest_cr + (dct_offset >> 1),
02021 wrap_c, 8) < 20 * s->qscale)
02022 skip_dct[7] = 1;
02023 }
02024 }
02025 }
02026
02027 if (s->quantizer_noise_shaping) {
02028 if (!skip_dct[0])
02029 get_visual_weight(weight[0], ptr_y , wrap_y);
02030 if (!skip_dct[1])
02031 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
02032 if (!skip_dct[2])
02033 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
02034 if (!skip_dct[3])
02035 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
02036 if (!skip_dct[4])
02037 get_visual_weight(weight[4], ptr_cb , wrap_c);
02038 if (!skip_dct[5])
02039 get_visual_weight(weight[5], ptr_cr , wrap_c);
02040 if (!s->chroma_y_shift) {
02041 if (!skip_dct[6])
02042 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
02043 wrap_c);
02044 if (!skip_dct[7])
02045 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
02046 wrap_c);
02047 }
02048 memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
02049 }
02050
02051
02052 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
02053 {
02054 for (i = 0; i < mb_block_count; i++) {
02055 if (!skip_dct[i]) {
02056 int overflow;
02057 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
02058
02059
02060
02061
02062
02063 if (overflow)
02064 clip_coeffs(s, s->block[i], s->block_last_index[i]);
02065 } else
02066 s->block_last_index[i] = -1;
02067 }
02068 if (s->quantizer_noise_shaping) {
02069 for (i = 0; i < mb_block_count; i++) {
02070 if (!skip_dct[i]) {
02071 s->block_last_index[i] =
02072 dct_quantize_refine(s, s->block[i], weight[i],
02073 orig[i], i, s->qscale);
02074 }
02075 }
02076 }
02077
02078 if (s->luma_elim_threshold && !s->mb_intra)
02079 for (i = 0; i < 4; i++)
02080 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
02081 if (s->chroma_elim_threshold && !s->mb_intra)
02082 for (i = 4; i < mb_block_count; i++)
02083 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
02084
02085 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
02086 for (i = 0; i < mb_block_count; i++) {
02087 if (s->block_last_index[i] == -1)
02088 s->coded_score[i] = INT_MAX / 256;
02089 }
02090 }
02091 }
02092
02093 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
02094 s->block_last_index[4] =
02095 s->block_last_index[5] = 0;
02096 s->block[4][0] =
02097 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
02098 }
02099
02100
02101 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
02102 for (i = 0; i < mb_block_count; i++) {
02103 int j;
02104 if (s->block_last_index[i] > 0) {
02105 for (j = 63; j > 0; j--) {
02106 if (s->block[i][s->intra_scantable.permutated[j]])
02107 break;
02108 }
02109 s->block_last_index[i] = j;
02110 }
02111 }
02112 }
02113
02114
02115 switch(s->codec_id){
02116 case AV_CODEC_ID_MPEG1VIDEO:
02117 case AV_CODEC_ID_MPEG2VIDEO:
02118 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
02119 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
02120 break;
02121 case AV_CODEC_ID_MPEG4:
02122 if (CONFIG_MPEG4_ENCODER)
02123 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
02124 break;
02125 case AV_CODEC_ID_MSMPEG4V2:
02126 case AV_CODEC_ID_MSMPEG4V3:
02127 case AV_CODEC_ID_WMV1:
02128 if (CONFIG_MSMPEG4_ENCODER)
02129 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
02130 break;
02131 case AV_CODEC_ID_WMV2:
02132 if (CONFIG_WMV2_ENCODER)
02133 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
02134 break;
02135 case AV_CODEC_ID_H261:
02136 if (CONFIG_H261_ENCODER)
02137 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
02138 break;
02139 case AV_CODEC_ID_H263:
02140 case AV_CODEC_ID_H263P:
02141 case AV_CODEC_ID_FLV1:
02142 case AV_CODEC_ID_RV10:
02143 case AV_CODEC_ID_RV20:
02144 if (CONFIG_H263_ENCODER)
02145 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
02146 break;
02147 case AV_CODEC_ID_MJPEG:
02148 case AV_CODEC_ID_AMV:
02149 if (CONFIG_MJPEG_ENCODER)
02150 ff_mjpeg_encode_mb(s, s->block);
02151 break;
02152 default:
02153 av_assert1(0);
02154 }
02155 }
02156
02157 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
02158 {
02159 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
02160 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
02161 }
02162
02163 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
02164 int i;
02165
02166 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
02167
02168
02169 d->mb_skip_run= s->mb_skip_run;
02170 for(i=0; i<3; i++)
02171 d->last_dc[i] = s->last_dc[i];
02172
02173
02174 d->mv_bits= s->mv_bits;
02175 d->i_tex_bits= s->i_tex_bits;
02176 d->p_tex_bits= s->p_tex_bits;
02177 d->i_count= s->i_count;
02178 d->f_count= s->f_count;
02179 d->b_count= s->b_count;
02180 d->skip_count= s->skip_count;
02181 d->misc_bits= s->misc_bits;
02182 d->last_bits= 0;
02183
02184 d->mb_skipped= 0;
02185 d->qscale= s->qscale;
02186 d->dquant= s->dquant;
02187
02188 d->esc3_level_length= s->esc3_level_length;
02189 }
02190
02191 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
02192 int i;
02193
02194 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
02195 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
02196
02197
02198 d->mb_skip_run= s->mb_skip_run;
02199 for(i=0; i<3; i++)
02200 d->last_dc[i] = s->last_dc[i];
02201
02202
02203 d->mv_bits= s->mv_bits;
02204 d->i_tex_bits= s->i_tex_bits;
02205 d->p_tex_bits= s->p_tex_bits;
02206 d->i_count= s->i_count;
02207 d->f_count= s->f_count;
02208 d->b_count= s->b_count;
02209 d->skip_count= s->skip_count;
02210 d->misc_bits= s->misc_bits;
02211
02212 d->mb_intra= s->mb_intra;
02213 d->mb_skipped= s->mb_skipped;
02214 d->mv_type= s->mv_type;
02215 d->mv_dir= s->mv_dir;
02216 d->pb= s->pb;
02217 if(s->data_partitioning){
02218 d->pb2= s->pb2;
02219 d->tex_pb= s->tex_pb;
02220 }
02221 d->block= s->block;
02222 for(i=0; i<8; i++)
02223 d->block_last_index[i]= s->block_last_index[i];
02224 d->interlaced_dct= s->interlaced_dct;
02225 d->qscale= s->qscale;
02226
02227 d->esc3_level_length= s->esc3_level_length;
02228 }
02229
02230 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
02231 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
02232 int *dmin, int *next_block, int motion_x, int motion_y)
02233 {
02234 int score;
02235 uint8_t *dest_backup[3];
02236
02237 copy_context_before_encode(s, backup, type);
02238
02239 s->block= s->blocks[*next_block];
02240 s->pb= pb[*next_block];
02241 if(s->data_partitioning){
02242 s->pb2 = pb2 [*next_block];
02243 s->tex_pb= tex_pb[*next_block];
02244 }
02245
02246 if(*next_block){
02247 memcpy(dest_backup, s->dest, sizeof(s->dest));
02248 s->dest[0] = s->rd_scratchpad;
02249 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
02250 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
02251 assert(s->linesize >= 32);
02252 }
02253
02254 encode_mb(s, motion_x, motion_y);
02255
02256 score= put_bits_count(&s->pb);
02257 if(s->data_partitioning){
02258 score+= put_bits_count(&s->pb2);
02259 score+= put_bits_count(&s->tex_pb);
02260 }
02261
02262 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
02263 ff_MPV_decode_mb(s, s->block);
02264
02265 score *= s->lambda2;
02266 score += sse_mb(s) << FF_LAMBDA_SHIFT;
02267 }
02268
02269 if(*next_block){
02270 memcpy(s->dest, dest_backup, sizeof(s->dest));
02271 }
02272
02273 if(score<*dmin){
02274 *dmin= score;
02275 *next_block^=1;
02276
02277 copy_context_after_encode(best, s, type);
02278 }
02279 }
02280
02281 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
02282 uint32_t *sq = ff_squareTbl + 256;
02283 int acc=0;
02284 int x,y;
02285
02286 if(w==16 && h==16)
02287 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
02288 else if(w==8 && h==8)
02289 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
02290
02291 for(y=0; y<h; y++){
02292 for(x=0; x<w; x++){
02293 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
02294 }
02295 }
02296
02297 av_assert2(acc>=0);
02298
02299 return acc;
02300 }
02301
02302 static int sse_mb(MpegEncContext *s){
02303 int w= 16;
02304 int h= 16;
02305
02306 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
02307 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
02308
02309 if(w==16 && h==16)
02310 if(s->avctx->mb_cmp == FF_CMP_NSSE){
02311 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
02312 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
02313 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
02314 }else{
02315 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
02316 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
02317 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
02318 }
02319 else
02320 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
02321 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
02322 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
02323 }
02324
02325 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
02326 MpegEncContext *s= *(void**)arg;
02327
02328
02329 s->me.pre_pass=1;
02330 s->me.dia_size= s->avctx->pre_dia_size;
02331 s->first_slice_line=1;
02332 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
02333 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
02334 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
02335 }
02336 s->first_slice_line=0;
02337 }
02338
02339 s->me.pre_pass=0;
02340
02341 return 0;
02342 }
02343
02344 static int estimate_motion_thread(AVCodecContext *c, void *arg){
02345 MpegEncContext *s= *(void**)arg;
02346
02347 ff_check_alignment();
02348
02349 s->me.dia_size= s->avctx->dia_size;
02350 s->first_slice_line=1;
02351 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
02352 s->mb_x=0;
02353 ff_init_block_index(s);
02354 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
02355 s->block_index[0]+=2;
02356 s->block_index[1]+=2;
02357 s->block_index[2]+=2;
02358 s->block_index[3]+=2;
02359
02360
02361 if(s->pict_type==AV_PICTURE_TYPE_B)
02362 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
02363 else
02364 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
02365 }
02366 s->first_slice_line=0;
02367 }
02368 return 0;
02369 }
02370
02371 static int mb_var_thread(AVCodecContext *c, void *arg){
02372 MpegEncContext *s= *(void**)arg;
02373 int mb_x, mb_y;
02374
02375 ff_check_alignment();
02376
02377 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
02378 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
02379 int xx = mb_x * 16;
02380 int yy = mb_y * 16;
02381 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
02382 int varc;
02383 int sum = s->dsp.pix_sum(pix, s->linesize);
02384
02385 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
02386
02387 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
02388 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
02389 s->me.mb_var_sum_temp += varc;
02390 }
02391 }
02392 return 0;
02393 }
02394
02395 static void write_slice_end(MpegEncContext *s){
02396 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
02397 if(s->partitioned_frame){
02398 ff_mpeg4_merge_partitions(s);
02399 }
02400
02401 ff_mpeg4_stuffing(&s->pb);
02402 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
02403 ff_mjpeg_encode_stuffing(s);
02404 }
02405
02406 avpriv_align_put_bits(&s->pb);
02407 flush_put_bits(&s->pb);
02408
02409 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
02410 s->misc_bits+= get_bits_diff(s);
02411 }
02412
02413 static void write_mb_info(MpegEncContext *s)
02414 {
02415 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
02416 int offset = put_bits_count(&s->pb);
02417 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
02418 int gobn = s->mb_y / s->gob_index;
02419 int pred_x, pred_y;
02420 if (CONFIG_H263_ENCODER)
02421 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
02422 bytestream_put_le32(&ptr, offset);
02423 bytestream_put_byte(&ptr, s->qscale);
02424 bytestream_put_byte(&ptr, gobn);
02425 bytestream_put_le16(&ptr, mba);
02426 bytestream_put_byte(&ptr, pred_x);
02427 bytestream_put_byte(&ptr, pred_y);
02428
02429 bytestream_put_byte(&ptr, 0);
02430 bytestream_put_byte(&ptr, 0);
02431 }
02432
02433 static void update_mb_info(MpegEncContext *s, int startcode)
02434 {
02435 if (!s->mb_info)
02436 return;
02437 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
02438 s->mb_info_size += 12;
02439 s->prev_mb_info = s->last_mb_info;
02440 }
02441 if (startcode) {
02442 s->prev_mb_info = put_bits_count(&s->pb)/8;
02443
02444
02445
02446
02447 return;
02448 }
02449
02450 s->last_mb_info = put_bits_count(&s->pb)/8;
02451 if (!s->mb_info_size)
02452 s->mb_info_size += 12;
02453 write_mb_info(s);
02454 }
02455
02456 static int encode_thread(AVCodecContext *c, void *arg){
02457 MpegEncContext *s= *(void**)arg;
02458 int mb_x, mb_y, pdif = 0;
02459 int chr_h= 16>>s->chroma_y_shift;
02460 int i, j;
02461 MpegEncContext best_s, backup_s;
02462 uint8_t bit_buf[2][MAX_MB_BYTES];
02463 uint8_t bit_buf2[2][MAX_MB_BYTES];
02464 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
02465 PutBitContext pb[2], pb2[2], tex_pb[2];
02466
02467
02468 ff_check_alignment();
02469
02470 for(i=0; i<2; i++){
02471 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
02472 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
02473 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
02474 }
02475
02476 s->last_bits= put_bits_count(&s->pb);
02477 s->mv_bits=0;
02478 s->misc_bits=0;
02479 s->i_tex_bits=0;
02480 s->p_tex_bits=0;
02481 s->i_count=0;
02482 s->f_count=0;
02483 s->b_count=0;
02484 s->skip_count=0;
02485
02486 for(i=0; i<3; i++){
02487
02488
02489 s->last_dc[i] = 128 << s->intra_dc_precision;
02490
02491 s->current_picture.f.error[i] = 0;
02492 }
02493 if(s->codec_id==AV_CODEC_ID_AMV){
02494 s->last_dc[0] = 128*8/13;
02495 s->last_dc[1] = 128*8/14;
02496 s->last_dc[2] = 128*8/14;
02497 }
02498 s->mb_skip_run = 0;
02499 memset(s->last_mv, 0, sizeof(s->last_mv));
02500
02501 s->last_mv_dir = 0;
02502
02503 switch(s->codec_id){
02504 case AV_CODEC_ID_H263:
02505 case AV_CODEC_ID_H263P:
02506 case AV_CODEC_ID_FLV1:
02507 if (CONFIG_H263_ENCODER)
02508 s->gob_index = ff_h263_get_gob_height(s);
02509 break;
02510 case AV_CODEC_ID_MPEG4:
02511 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
02512 ff_mpeg4_init_partitions(s);
02513 break;
02514 }
02515
02516 s->resync_mb_x=0;
02517 s->resync_mb_y=0;
02518 s->first_slice_line = 1;
02519 s->ptr_lastgob = s->pb.buf;
02520 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
02521
02522 s->mb_x=0;
02523 s->mb_y= mb_y;
02524
02525 ff_set_qscale(s, s->qscale);
02526 ff_init_block_index(s);
02527
02528 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
02529 int xy= mb_y*s->mb_stride + mb_x;
02530 int mb_type= s->mb_type[xy];
02531
02532 int dmin= INT_MAX;
02533 int dir;
02534
02535 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
02536 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
02537 return -1;
02538 }
02539 if(s->data_partitioning){
02540 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
02541 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
02542 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
02543 return -1;
02544 }
02545 }
02546
02547 s->mb_x = mb_x;
02548 s->mb_y = mb_y;
02549 ff_update_block_index(s);
02550
02551 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
02552 ff_h261_reorder_mb_index(s);
02553 xy= s->mb_y*s->mb_stride + s->mb_x;
02554 mb_type= s->mb_type[xy];
02555 }
02556
02557
02558 if(s->rtp_mode){
02559 int current_packet_size, is_gob_start;
02560
02561 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
02562
02563 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
02564
02565 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
02566
02567 switch(s->codec_id){
02568 case AV_CODEC_ID_H263:
02569 case AV_CODEC_ID_H263P:
02570 if(!s->h263_slice_structured)
02571 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
02572 break;
02573 case AV_CODEC_ID_MPEG2VIDEO:
02574 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
02575 case AV_CODEC_ID_MPEG1VIDEO:
02576 if(s->mb_skip_run) is_gob_start=0;
02577 break;
02578 case AV_CODEC_ID_MJPEG:
02579 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
02580 break;
02581 }
02582
02583 if(is_gob_start){
02584 if(s->start_mb_y != mb_y || mb_x!=0){
02585 write_slice_end(s);
02586 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
02587 ff_mpeg4_init_partitions(s);
02588 }
02589 }
02590
02591 av_assert2((put_bits_count(&s->pb)&7) == 0);
02592 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
02593
02594 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
02595 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
02596 int d= 100 / s->avctx->error_rate;
02597 if(r % d == 0){
02598 current_packet_size=0;
02599 s->pb.buf_ptr= s->ptr_lastgob;
02600 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
02601 }
02602 }
02603
02604 if (s->avctx->rtp_callback){
02605 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
02606 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
02607 }
02608 update_mb_info(s, 1);
02609
02610 switch(s->codec_id){
02611 case AV_CODEC_ID_MPEG4:
02612 if (CONFIG_MPEG4_ENCODER) {
02613 ff_mpeg4_encode_video_packet_header(s);
02614 ff_mpeg4_clean_buffers(s);
02615 }
02616 break;
02617 case AV_CODEC_ID_MPEG1VIDEO:
02618 case AV_CODEC_ID_MPEG2VIDEO:
02619 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
02620 ff_mpeg1_encode_slice_header(s);
02621 ff_mpeg1_clean_buffers(s);
02622 }
02623 break;
02624 case AV_CODEC_ID_H263:
02625 case AV_CODEC_ID_H263P:
02626 if (CONFIG_H263_ENCODER)
02627 ff_h263_encode_gob_header(s, mb_y);
02628 break;
02629 }
02630
02631 if(s->flags&CODEC_FLAG_PASS1){
02632 int bits= put_bits_count(&s->pb);
02633 s->misc_bits+= bits - s->last_bits;
02634 s->last_bits= bits;
02635 }
02636
02637 s->ptr_lastgob += current_packet_size;
02638 s->first_slice_line=1;
02639 s->resync_mb_x=mb_x;
02640 s->resync_mb_y=mb_y;
02641 }
02642 }
02643
02644 if( (s->resync_mb_x == s->mb_x)
02645 && s->resync_mb_y+1 == s->mb_y){
02646 s->first_slice_line=0;
02647 }
02648
02649 s->mb_skipped=0;
02650 s->dquant=0;
02651
02652 update_mb_info(s, 0);
02653
02654 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
02655 int next_block=0;
02656 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
02657
02658 copy_context_before_encode(&backup_s, s, -1);
02659 backup_s.pb= s->pb;
02660 best_s.data_partitioning= s->data_partitioning;
02661 best_s.partitioned_frame= s->partitioned_frame;
02662 if(s->data_partitioning){
02663 backup_s.pb2= s->pb2;
02664 backup_s.tex_pb= s->tex_pb;
02665 }
02666
02667 if(mb_type&CANDIDATE_MB_TYPE_INTER){
02668 s->mv_dir = MV_DIR_FORWARD;
02669 s->mv_type = MV_TYPE_16X16;
02670 s->mb_intra= 0;
02671 s->mv[0][0][0] = s->p_mv_table[xy][0];
02672 s->mv[0][0][1] = s->p_mv_table[xy][1];
02673 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
02674 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02675 }
02676 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
02677 s->mv_dir = MV_DIR_FORWARD;
02678 s->mv_type = MV_TYPE_FIELD;
02679 s->mb_intra= 0;
02680 for(i=0; i<2; i++){
02681 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02682 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02683 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02684 }
02685 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
02686 &dmin, &next_block, 0, 0);
02687 }
02688 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
02689 s->mv_dir = MV_DIR_FORWARD;
02690 s->mv_type = MV_TYPE_16X16;
02691 s->mb_intra= 0;
02692 s->mv[0][0][0] = 0;
02693 s->mv[0][0][1] = 0;
02694 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
02695 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02696 }
02697 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
02698 s->mv_dir = MV_DIR_FORWARD;
02699 s->mv_type = MV_TYPE_8X8;
02700 s->mb_intra= 0;
02701 for(i=0; i<4; i++){
02702 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
02703 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
02704 }
02705 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
02706 &dmin, &next_block, 0, 0);
02707 }
02708 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
02709 s->mv_dir = MV_DIR_FORWARD;
02710 s->mv_type = MV_TYPE_16X16;
02711 s->mb_intra= 0;
02712 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
02713 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
02714 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
02715 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02716 }
02717 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
02718 s->mv_dir = MV_DIR_BACKWARD;
02719 s->mv_type = MV_TYPE_16X16;
02720 s->mb_intra= 0;
02721 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
02722 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
02723 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
02724 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
02725 }
02726 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
02727 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02728 s->mv_type = MV_TYPE_16X16;
02729 s->mb_intra= 0;
02730 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
02731 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
02732 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
02733 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
02734 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
02735 &dmin, &next_block, 0, 0);
02736 }
02737 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
02738 s->mv_dir = MV_DIR_FORWARD;
02739 s->mv_type = MV_TYPE_FIELD;
02740 s->mb_intra= 0;
02741 for(i=0; i<2; i++){
02742 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
02743 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
02744 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
02745 }
02746 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
02747 &dmin, &next_block, 0, 0);
02748 }
02749 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
02750 s->mv_dir = MV_DIR_BACKWARD;
02751 s->mv_type = MV_TYPE_FIELD;
02752 s->mb_intra= 0;
02753 for(i=0; i<2; i++){
02754 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
02755 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
02756 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
02757 }
02758 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
02759 &dmin, &next_block, 0, 0);
02760 }
02761 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
02762 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02763 s->mv_type = MV_TYPE_FIELD;
02764 s->mb_intra= 0;
02765 for(dir=0; dir<2; dir++){
02766 for(i=0; i<2; i++){
02767 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
02768 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
02769 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
02770 }
02771 }
02772 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
02773 &dmin, &next_block, 0, 0);
02774 }
02775 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
02776 s->mv_dir = 0;
02777 s->mv_type = MV_TYPE_16X16;
02778 s->mb_intra= 1;
02779 s->mv[0][0][0] = 0;
02780 s->mv[0][0][1] = 0;
02781 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
02782 &dmin, &next_block, 0, 0);
02783 if(s->h263_pred || s->h263_aic){
02784 if(best_s.mb_intra)
02785 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
02786 else
02787 ff_clean_intra_table_entries(s);
02788 }
02789 }
02790
02791 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
02792 if(best_s.mv_type==MV_TYPE_16X16){
02793 const int last_qp= backup_s.qscale;
02794 int qpi, qp, dc[6];
02795 DCTELEM ac[6][16];
02796 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
02797 static const int dquant_tab[4]={-1,1,-2,2};
02798
02799 av_assert2(backup_s.dquant == 0);
02800
02801
02802 s->mv_dir= best_s.mv_dir;
02803 s->mv_type = MV_TYPE_16X16;
02804 s->mb_intra= best_s.mb_intra;
02805 s->mv[0][0][0] = best_s.mv[0][0][0];
02806 s->mv[0][0][1] = best_s.mv[0][0][1];
02807 s->mv[1][0][0] = best_s.mv[1][0][0];
02808 s->mv[1][0][1] = best_s.mv[1][0][1];
02809
02810 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
02811 for(; qpi<4; qpi++){
02812 int dquant= dquant_tab[qpi];
02813 qp= last_qp + dquant;
02814 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
02815 continue;
02816 backup_s.dquant= dquant;
02817 if(s->mb_intra && s->dc_val[0]){
02818 for(i=0; i<6; i++){
02819 dc[i]= s->dc_val[0][ s->block_index[i] ];
02820 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
02821 }
02822 }
02823
02824 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02825 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
02826 if(best_s.qscale != qp){
02827 if(s->mb_intra && s->dc_val[0]){
02828 for(i=0; i<6; i++){
02829 s->dc_val[0][ s->block_index[i] ]= dc[i];
02830 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
02831 }
02832 }
02833 }
02834 }
02835 }
02836 }
02837 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
02838 int mx= s->b_direct_mv_table[xy][0];
02839 int my= s->b_direct_mv_table[xy][1];
02840
02841 backup_s.dquant = 0;
02842 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02843 s->mb_intra= 0;
02844 ff_mpeg4_set_direct_mv(s, mx, my);
02845 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02846 &dmin, &next_block, mx, my);
02847 }
02848 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
02849 backup_s.dquant = 0;
02850 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02851 s->mb_intra= 0;
02852 ff_mpeg4_set_direct_mv(s, 0, 0);
02853 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02854 &dmin, &next_block, 0, 0);
02855 }
02856 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
02857 int coded=0;
02858 for(i=0; i<6; i++)
02859 coded |= s->block_last_index[i];
02860 if(coded){
02861 int mx,my;
02862 memcpy(s->mv, best_s.mv, sizeof(s->mv));
02863 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
02864 mx=my=0;
02865 ff_mpeg4_set_direct_mv(s, mx, my);
02866 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
02867 mx= s->mv[1][0][0];
02868 my= s->mv[1][0][1];
02869 }else{
02870 mx= s->mv[0][0][0];
02871 my= s->mv[0][0][1];
02872 }
02873
02874 s->mv_dir= best_s.mv_dir;
02875 s->mv_type = best_s.mv_type;
02876 s->mb_intra= 0;
02877
02878
02879
02880
02881 backup_s.dquant= 0;
02882 s->skipdct=1;
02883 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02884 &dmin, &next_block, mx, my);
02885 s->skipdct=0;
02886 }
02887 }
02888
02889 s->current_picture.f.qscale_table[xy] = best_s.qscale;
02890
02891 copy_context_after_encode(s, &best_s, -1);
02892
02893 pb_bits_count= put_bits_count(&s->pb);
02894 flush_put_bits(&s->pb);
02895 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
02896 s->pb= backup_s.pb;
02897
02898 if(s->data_partitioning){
02899 pb2_bits_count= put_bits_count(&s->pb2);
02900 flush_put_bits(&s->pb2);
02901 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
02902 s->pb2= backup_s.pb2;
02903
02904 tex_pb_bits_count= put_bits_count(&s->tex_pb);
02905 flush_put_bits(&s->tex_pb);
02906 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
02907 s->tex_pb= backup_s.tex_pb;
02908 }
02909 s->last_bits= put_bits_count(&s->pb);
02910
02911 if (CONFIG_H263_ENCODER &&
02912 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
02913 ff_h263_update_motion_val(s);
02914
02915 if(next_block==0){
02916 s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
02917 s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
02918 s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
02919 }
02920
02921 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
02922 ff_MPV_decode_mb(s, s->block);
02923 } else {
02924 int motion_x = 0, motion_y = 0;
02925 s->mv_type=MV_TYPE_16X16;
02926
02927
02928 switch(mb_type){
02929 case CANDIDATE_MB_TYPE_INTRA:
02930 s->mv_dir = 0;
02931 s->mb_intra= 1;
02932 motion_x= s->mv[0][0][0] = 0;
02933 motion_y= s->mv[0][0][1] = 0;
02934 break;
02935 case CANDIDATE_MB_TYPE_INTER:
02936 s->mv_dir = MV_DIR_FORWARD;
02937 s->mb_intra= 0;
02938 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
02939 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
02940 break;
02941 case CANDIDATE_MB_TYPE_INTER_I:
02942 s->mv_dir = MV_DIR_FORWARD;
02943 s->mv_type = MV_TYPE_FIELD;
02944 s->mb_intra= 0;
02945 for(i=0; i<2; i++){
02946 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02947 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02948 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02949 }
02950 break;
02951 case CANDIDATE_MB_TYPE_INTER4V:
02952 s->mv_dir = MV_DIR_FORWARD;
02953 s->mv_type = MV_TYPE_8X8;
02954 s->mb_intra= 0;
02955 for(i=0; i<4; i++){
02956 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
02957 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
02958 }
02959 break;
02960 case CANDIDATE_MB_TYPE_DIRECT:
02961 if (CONFIG_MPEG4_ENCODER) {
02962 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
02963 s->mb_intra= 0;
02964 motion_x=s->b_direct_mv_table[xy][0];
02965 motion_y=s->b_direct_mv_table[xy][1];
02966 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
02967 }
02968 break;
02969 case CANDIDATE_MB_TYPE_DIRECT0:
02970 if (CONFIG_MPEG4_ENCODER) {
02971 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
02972 s->mb_intra= 0;
02973 ff_mpeg4_set_direct_mv(s, 0, 0);
02974 }
02975 break;
02976 case CANDIDATE_MB_TYPE_BIDIR:
02977 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02978 s->mb_intra= 0;
02979 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
02980 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
02981 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
02982 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
02983 break;
02984 case CANDIDATE_MB_TYPE_BACKWARD:
02985 s->mv_dir = MV_DIR_BACKWARD;
02986 s->mb_intra= 0;
02987 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
02988 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
02989 break;
02990 case CANDIDATE_MB_TYPE_FORWARD:
02991 s->mv_dir = MV_DIR_FORWARD;
02992 s->mb_intra= 0;
02993 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
02994 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
02995
02996 break;
02997 case CANDIDATE_MB_TYPE_FORWARD_I:
02998 s->mv_dir = MV_DIR_FORWARD;
02999 s->mv_type = MV_TYPE_FIELD;
03000 s->mb_intra= 0;
03001 for(i=0; i<2; i++){
03002 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
03003 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
03004 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
03005 }
03006 break;
03007 case CANDIDATE_MB_TYPE_BACKWARD_I:
03008 s->mv_dir = MV_DIR_BACKWARD;
03009 s->mv_type = MV_TYPE_FIELD;
03010 s->mb_intra= 0;
03011 for(i=0; i<2; i++){
03012 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
03013 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
03014 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
03015 }
03016 break;
03017 case CANDIDATE_MB_TYPE_BIDIR_I:
03018 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
03019 s->mv_type = MV_TYPE_FIELD;
03020 s->mb_intra= 0;
03021 for(dir=0; dir<2; dir++){
03022 for(i=0; i<2; i++){
03023 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
03024 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
03025 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
03026 }
03027 }
03028 break;
03029 default:
03030 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
03031 }
03032
03033 encode_mb(s, motion_x, motion_y);
03034
03035
03036 s->last_mv_dir = s->mv_dir;
03037
03038 if (CONFIG_H263_ENCODER &&
03039 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
03040 ff_h263_update_motion_val(s);
03041
03042 ff_MPV_decode_mb(s, s->block);
03043 }
03044
03045
03046 if(s->mb_intra ){
03047 s->p_mv_table[xy][0]=0;
03048 s->p_mv_table[xy][1]=0;
03049 }
03050
03051 if(s->flags&CODEC_FLAG_PSNR){
03052 int w= 16;
03053 int h= 16;
03054
03055 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
03056 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
03057
03058 s->current_picture.f.error[0] += sse(
03059 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
03060 s->dest[0], w, h, s->linesize);
03061 s->current_picture.f.error[1] += sse(
03062 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
03063 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
03064 s->current_picture.f.error[2] += sse(
03065 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
03066 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
03067 }
03068 if(s->loop_filter){
03069 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
03070 ff_h263_loop_filter(s);
03071 }
03072
03073 }
03074 }
03075
03076
03077 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
03078 ff_msmpeg4_encode_ext_header(s);
03079
03080 write_slice_end(s);
03081
03082
03083 if (s->avctx->rtp_callback) {
03084 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
03085 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
03086
03087 emms_c();
03088 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
03089 }
03090
03091 return 0;
03092 }
03093
03094 #define MERGE(field) dst->field += src->field; src->field=0
03095 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
03096 MERGE(me.scene_change_score);
03097 MERGE(me.mc_mb_var_sum_temp);
03098 MERGE(me.mb_var_sum_temp);
03099 }
03100
03101 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
03102 int i;
03103
03104 MERGE(dct_count[0]);
03105 MERGE(dct_count[1]);
03106 MERGE(mv_bits);
03107 MERGE(i_tex_bits);
03108 MERGE(p_tex_bits);
03109 MERGE(i_count);
03110 MERGE(f_count);
03111 MERGE(b_count);
03112 MERGE(skip_count);
03113 MERGE(misc_bits);
03114 MERGE(error_count);
03115 MERGE(padding_bug_score);
03116 MERGE(current_picture.f.error[0]);
03117 MERGE(current_picture.f.error[1]);
03118 MERGE(current_picture.f.error[2]);
03119
03120 if(dst->avctx->noise_reduction){
03121 for(i=0; i<64; i++){
03122 MERGE(dct_error_sum[0][i]);
03123 MERGE(dct_error_sum[1][i]);
03124 }
03125 }
03126
03127 assert(put_bits_count(&src->pb) % 8 ==0);
03128 assert(put_bits_count(&dst->pb) % 8 ==0);
03129 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
03130 flush_put_bits(&dst->pb);
03131 }
03132
03133 static int estimate_qp(MpegEncContext *s, int dry_run){
03134 if (s->next_lambda){
03135 s->current_picture_ptr->f.quality =
03136 s->current_picture.f.quality = s->next_lambda;
03137 if(!dry_run) s->next_lambda= 0;
03138 } else if (!s->fixed_qscale) {
03139 s->current_picture_ptr->f.quality =
03140 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
03141 if (s->current_picture.f.quality < 0)
03142 return -1;
03143 }
03144
03145 if(s->adaptive_quant){
03146 switch(s->codec_id){
03147 case AV_CODEC_ID_MPEG4:
03148 if (CONFIG_MPEG4_ENCODER)
03149 ff_clean_mpeg4_qscales(s);
03150 break;
03151 case AV_CODEC_ID_H263:
03152 case AV_CODEC_ID_H263P:
03153 case AV_CODEC_ID_FLV1:
03154 if (CONFIG_H263_ENCODER)
03155 ff_clean_h263_qscales(s);
03156 break;
03157 default:
03158 ff_init_qscale_tab(s);
03159 }
03160
03161 s->lambda= s->lambda_table[0];
03162
03163 }else
03164 s->lambda = s->current_picture.f.quality;
03165
03166 update_qscale(s);
03167 return 0;
03168 }
03169
03170
03171 static void set_frame_distances(MpegEncContext * s){
03172 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
03173 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
03174
03175 if(s->pict_type==AV_PICTURE_TYPE_B){
03176 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
03177 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
03178 }else{
03179 s->pp_time= s->time - s->last_non_b_time;
03180 s->last_non_b_time= s->time;
03181 assert(s->picture_number==0 || s->pp_time > 0);
03182 }
03183 }
03184
03185 static int encode_picture(MpegEncContext *s, int picture_number)
03186 {
03187 int i;
03188 int bits;
03189 int context_count = s->slice_context_count;
03190
03191 s->picture_number = picture_number;
03192
03193
03194 s->me.mb_var_sum_temp =
03195 s->me.mc_mb_var_sum_temp = 0;
03196
03197
03198
03199 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
03200 set_frame_distances(s);
03201 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
03202 ff_set_mpeg4_time(s);
03203
03204 s->me.scene_change_score=0;
03205
03206
03207
03208 if(s->pict_type==AV_PICTURE_TYPE_I){
03209 if(s->msmpeg4_version >= 3) s->no_rounding=1;
03210 else s->no_rounding=0;
03211 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
03212 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
03213 s->no_rounding ^= 1;
03214 }
03215
03216 if(s->flags & CODEC_FLAG_PASS2){
03217 if (estimate_qp(s,1) < 0)
03218 return -1;
03219 ff_get_2pass_fcode(s);
03220 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
03221 if(s->pict_type==AV_PICTURE_TYPE_B)
03222 s->lambda= s->last_lambda_for[s->pict_type];
03223 else
03224 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
03225 update_qscale(s);
03226 }
03227
03228 if(s->codec_id != AV_CODEC_ID_AMV){
03229 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
03230 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
03231 s->q_chroma_intra_matrix = s->q_intra_matrix;
03232 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
03233 }
03234
03235 s->mb_intra=0;
03236 for(i=1; i<context_count; i++){
03237 ff_update_duplicate_context(s->thread_context[i], s);
03238 }
03239
03240 if(ff_init_me(s)<0)
03241 return -1;
03242
03243
03244 if(s->pict_type != AV_PICTURE_TYPE_I){
03245 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
03246 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
03247 if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
03248 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
03249 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03250 }
03251 }
03252
03253 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03254 }else {
03255
03256 for(i=0; i<s->mb_stride*s->mb_height; i++)
03257 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
03258
03259 if(!s->fixed_qscale){
03260
03261 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03262 }
03263 }
03264 for(i=1; i<context_count; i++){
03265 merge_context_after_me(s, s->thread_context[i]);
03266 }
03267 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
03268 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
03269 emms_c();
03270
03271 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
03272 s->pict_type= AV_PICTURE_TYPE_I;
03273 for(i=0; i<s->mb_stride*s->mb_height; i++)
03274 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
03275
03276 if(s->msmpeg4_version >= 3) s->no_rounding=1;
03277 }
03278
03279 if(!s->umvplus){
03280 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
03281 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
03282
03283 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03284 int a,b;
03285 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I);
03286 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
03287 s->f_code= FFMAX3(s->f_code, a, b);
03288 }
03289
03290 ff_fix_long_p_mvs(s);
03291 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
03292 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03293 int j;
03294 for(i=0; i<2; i++){
03295 for(j=0; j<2; j++)
03296 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
03297 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
03298 }
03299 }
03300 }
03301
03302 if(s->pict_type==AV_PICTURE_TYPE_B){
03303 int a, b;
03304
03305 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
03306 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
03307 s->f_code = FFMAX(a, b);
03308
03309 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
03310 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
03311 s->b_code = FFMAX(a, b);
03312
03313 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
03314 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
03315 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
03316 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
03317 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03318 int dir, j;
03319 for(dir=0; dir<2; dir++){
03320 for(i=0; i<2; i++){
03321 for(j=0; j<2; j++){
03322 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
03323 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
03324 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
03325 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
03326 }
03327 }
03328 }
03329 }
03330 }
03331 }
03332
03333 if (estimate_qp(s, 0) < 0)
03334 return -1;
03335
03336 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
03337 s->qscale= 3;
03338
03339 if (s->out_format == FMT_MJPEG) {
03340
03341 for(i=1;i<64;i++){
03342 int j= s->dsp.idct_permutation[i];
03343
03344 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
03345 }
03346 s->y_dc_scale_table=
03347 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
03348 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
03349 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
03350 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
03351 s->qscale= 8;
03352 }
03353 if(s->codec_id == AV_CODEC_ID_AMV){
03354 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
03355 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
03356 for(i=1;i<64;i++){
03357 int j= s->dsp.idct_permutation[ff_zigzag_direct[i]];
03358
03359 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
03360 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
03361 }
03362 s->y_dc_scale_table= y;
03363 s->c_dc_scale_table= c;
03364 s->intra_matrix[0] = 13;
03365 s->chroma_intra_matrix[0] = 14;
03366 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
03367 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
03368 ff_convert_matrix(&s->dsp, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
03369 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
03370 s->qscale= 8;
03371 }
03372
03373
03374 s->current_picture_ptr->f.key_frame =
03375 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
03376 s->current_picture_ptr->f.pict_type =
03377 s->current_picture.f.pict_type = s->pict_type;
03378
03379 if (s->current_picture.f.key_frame)
03380 s->picture_in_gop_number=0;
03381
03382 s->mb_x = s->mb_y = 0;
03383 s->last_bits= put_bits_count(&s->pb);
03384 switch(s->out_format) {
03385 case FMT_MJPEG:
03386 if (CONFIG_MJPEG_ENCODER)
03387 ff_mjpeg_encode_picture_header(s);
03388 break;
03389 case FMT_H261:
03390 if (CONFIG_H261_ENCODER)
03391 ff_h261_encode_picture_header(s, picture_number);
03392 break;
03393 case FMT_H263:
03394 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
03395 ff_wmv2_encode_picture_header(s, picture_number);
03396 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
03397 ff_msmpeg4_encode_picture_header(s, picture_number);
03398 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
03399 ff_mpeg4_encode_picture_header(s, picture_number);
03400 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
03401 ff_rv10_encode_picture_header(s, picture_number);
03402 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
03403 ff_rv20_encode_picture_header(s, picture_number);
03404 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
03405 ff_flv_encode_picture_header(s, picture_number);
03406 else if (CONFIG_H263_ENCODER)
03407 ff_h263_encode_picture_header(s, picture_number);
03408 break;
03409 case FMT_MPEG1:
03410 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
03411 ff_mpeg1_encode_picture_header(s, picture_number);
03412 break;
03413 case FMT_H264:
03414 break;
03415 default:
03416 av_assert0(0);
03417 }
03418 bits= put_bits_count(&s->pb);
03419 s->header_bits= bits - s->last_bits;
03420
03421 for(i=1; i<context_count; i++){
03422 update_duplicate_context_after_me(s->thread_context[i], s);
03423 }
03424 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03425 for(i=1; i<context_count; i++){
03426 merge_context_after_encode(s, s->thread_context[i]);
03427 }
03428 emms_c();
03429 return 0;
03430 }
03431
03432 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
03433 const int intra= s->mb_intra;
03434 int i;
03435
03436 s->dct_count[intra]++;
03437
03438 for(i=0; i<64; i++){
03439 int level= block[i];
03440
03441 if(level){
03442 if(level>0){
03443 s->dct_error_sum[intra][i] += level;
03444 level -= s->dct_offset[intra][i];
03445 if(level<0) level=0;
03446 }else{
03447 s->dct_error_sum[intra][i] -= level;
03448 level += s->dct_offset[intra][i];
03449 if(level>0) level=0;
03450 }
03451 block[i]= level;
03452 }
03453 }
03454 }
03455
03456 static int dct_quantize_trellis_c(MpegEncContext *s,
03457 DCTELEM *block, int n,
03458 int qscale, int *overflow){
03459 const int *qmat;
03460 const uint8_t *scantable= s->intra_scantable.scantable;
03461 const uint8_t *perm_scantable= s->intra_scantable.permutated;
03462 int max=0;
03463 unsigned int threshold1, threshold2;
03464 int bias=0;
03465 int run_tab[65];
03466 int level_tab[65];
03467 int score_tab[65];
03468 int survivor[65];
03469 int survivor_count;
03470 int last_run=0;
03471 int last_level=0;
03472 int last_score= 0;
03473 int last_i;
03474 int coeff[2][64];
03475 int coeff_count[64];
03476 int qmul, qadd, start_i, last_non_zero, i, dc;
03477 const int esc_length= s->ac_esc_length;
03478 uint8_t * length;
03479 uint8_t * last_length;
03480 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
03481
03482 s->dsp.fdct (block);
03483
03484 if(s->dct_error_sum)
03485 s->denoise_dct(s, block);
03486 qmul= qscale*16;
03487 qadd= ((qscale-1)|1)*8;
03488
03489 if (s->mb_intra) {
03490 int q;
03491 if (!s->h263_aic) {
03492 if (n < 4)
03493 q = s->y_dc_scale;
03494 else
03495 q = s->c_dc_scale;
03496 q = q << 3;
03497 } else{
03498
03499 q = 1 << 3;
03500 qadd=0;
03501 }
03502
03503
03504 block[0] = (block[0] + (q >> 1)) / q;
03505 start_i = 1;
03506 last_non_zero = 0;
03507 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
03508 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
03509 bias= 1<<(QMAT_SHIFT-1);
03510 length = s->intra_ac_vlc_length;
03511 last_length= s->intra_ac_vlc_last_length;
03512 } else {
03513 start_i = 0;
03514 last_non_zero = -1;
03515 qmat = s->q_inter_matrix[qscale];
03516 length = s->inter_ac_vlc_length;
03517 last_length= s->inter_ac_vlc_last_length;
03518 }
03519 last_i= start_i;
03520
03521 threshold1= (1<<QMAT_SHIFT) - bias - 1;
03522 threshold2= (threshold1<<1);
03523
03524 for(i=63; i>=start_i; i--) {
03525 const int j = scantable[i];
03526 int level = block[j] * qmat[j];
03527
03528 if(((unsigned)(level+threshold1))>threshold2){
03529 last_non_zero = i;
03530 break;
03531 }
03532 }
03533
03534 for(i=start_i; i<=last_non_zero; i++) {
03535 const int j = scantable[i];
03536 int level = block[j] * qmat[j];
03537
03538
03539
03540 if(((unsigned)(level+threshold1))>threshold2){
03541 if(level>0){
03542 level= (bias + level)>>QMAT_SHIFT;
03543 coeff[0][i]= level;
03544 coeff[1][i]= level-1;
03545
03546 }else{
03547 level= (bias - level)>>QMAT_SHIFT;
03548 coeff[0][i]= -level;
03549 coeff[1][i]= -level+1;
03550
03551 }
03552 coeff_count[i]= FFMIN(level, 2);
03553 av_assert2(coeff_count[i]);
03554 max |=level;
03555 }else{
03556 coeff[0][i]= (level>>31)|1;
03557 coeff_count[i]= 1;
03558 }
03559 }
03560
03561 *overflow= s->max_qcoeff < max;
03562
03563 if(last_non_zero < start_i){
03564 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03565 return last_non_zero;
03566 }
03567
03568 score_tab[start_i]= 0;
03569 survivor[0]= start_i;
03570 survivor_count= 1;
03571
03572 for(i=start_i; i<=last_non_zero; i++){
03573 int level_index, j, zero_distortion;
03574 int dct_coeff= FFABS(block[ scantable[i] ]);
03575 int best_score=256*256*256*120;
03576
03577 if (s->dsp.fdct == ff_fdct_ifast)
03578 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
03579 zero_distortion= dct_coeff*dct_coeff;
03580
03581 for(level_index=0; level_index < coeff_count[i]; level_index++){
03582 int distortion;
03583 int level= coeff[level_index][i];
03584 const int alevel= FFABS(level);
03585 int unquant_coeff;
03586
03587 av_assert2(level);
03588
03589 if(s->out_format == FMT_H263){
03590 unquant_coeff= alevel*qmul + qadd;
03591 }else{
03592 j= s->dsp.idct_permutation[ scantable[i] ];
03593 if(s->mb_intra){
03594 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
03595 unquant_coeff = (unquant_coeff - 1) | 1;
03596 }else{
03597 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
03598 unquant_coeff = (unquant_coeff - 1) | 1;
03599 }
03600 unquant_coeff<<= 3;
03601 }
03602
03603 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
03604 level+=64;
03605 if((level&(~127)) == 0){
03606 for(j=survivor_count-1; j>=0; j--){
03607 int run= i - survivor[j];
03608 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03609 score += score_tab[i-run];
03610
03611 if(score < best_score){
03612 best_score= score;
03613 run_tab[i+1]= run;
03614 level_tab[i+1]= level-64;
03615 }
03616 }
03617
03618 if(s->out_format == FMT_H263){
03619 for(j=survivor_count-1; j>=0; j--){
03620 int run= i - survivor[j];
03621 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03622 score += score_tab[i-run];
03623 if(score < last_score){
03624 last_score= score;
03625 last_run= run;
03626 last_level= level-64;
03627 last_i= i+1;
03628 }
03629 }
03630 }
03631 }else{
03632 distortion += esc_length*lambda;
03633 for(j=survivor_count-1; j>=0; j--){
03634 int run= i - survivor[j];
03635 int score= distortion + score_tab[i-run];
03636
03637 if(score < best_score){
03638 best_score= score;
03639 run_tab[i+1]= run;
03640 level_tab[i+1]= level-64;
03641 }
03642 }
03643
03644 if(s->out_format == FMT_H263){
03645 for(j=survivor_count-1; j>=0; j--){
03646 int run= i - survivor[j];
03647 int score= distortion + score_tab[i-run];
03648 if(score < last_score){
03649 last_score= score;
03650 last_run= run;
03651 last_level= level-64;
03652 last_i= i+1;
03653 }
03654 }
03655 }
03656 }
03657 }
03658
03659 score_tab[i+1]= best_score;
03660
03661
03662 if(last_non_zero <= 27){
03663 for(; survivor_count; survivor_count--){
03664 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
03665 break;
03666 }
03667 }else{
03668 for(; survivor_count; survivor_count--){
03669 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
03670 break;
03671 }
03672 }
03673
03674 survivor[ survivor_count++ ]= i+1;
03675 }
03676
03677 if(s->out_format != FMT_H263){
03678 last_score= 256*256*256*120;
03679 for(i= survivor[0]; i<=last_non_zero + 1; i++){
03680 int score= score_tab[i];
03681 if(i) score += lambda*2;
03682
03683 if(score < last_score){
03684 last_score= score;
03685 last_i= i;
03686 last_level= level_tab[i];
03687 last_run= run_tab[i];
03688 }
03689 }
03690 }
03691
03692 s->coded_score[n] = last_score;
03693
03694 dc= FFABS(block[0]);
03695 last_non_zero= last_i - 1;
03696 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03697
03698 if(last_non_zero < start_i)
03699 return last_non_zero;
03700
03701 if(last_non_zero == 0 && start_i == 0){
03702 int best_level= 0;
03703 int best_score= dc * dc;
03704
03705 for(i=0; i<coeff_count[0]; i++){
03706 int level= coeff[i][0];
03707 int alevel= FFABS(level);
03708 int unquant_coeff, score, distortion;
03709
03710 if(s->out_format == FMT_H263){
03711 unquant_coeff= (alevel*qmul + qadd)>>3;
03712 }else{
03713 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
03714 unquant_coeff = (unquant_coeff - 1) | 1;
03715 }
03716 unquant_coeff = (unquant_coeff + 4) >> 3;
03717 unquant_coeff<<= 3 + 3;
03718
03719 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
03720 level+=64;
03721 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
03722 else score= distortion + esc_length*lambda;
03723
03724 if(score < best_score){
03725 best_score= score;
03726 best_level= level - 64;
03727 }
03728 }
03729 block[0]= best_level;
03730 s->coded_score[n] = best_score - dc*dc;
03731 if(best_level == 0) return -1;
03732 else return last_non_zero;
03733 }
03734
03735 i= last_i;
03736 av_assert2(last_level);
03737
03738 block[ perm_scantable[last_non_zero] ]= last_level;
03739 i -= last_run + 1;
03740
03741 for(; i>start_i; i -= run_tab[i] + 1){
03742 block[ perm_scantable[i-1] ]= level_tab[i];
03743 }
03744
03745 return last_non_zero;
03746 }
03747
03748
03749 static int16_t basis[64][64];
03750
03751 static void build_basis(uint8_t *perm){
03752 int i, j, x, y;
03753 emms_c();
03754 for(i=0; i<8; i++){
03755 for(j=0; j<8; j++){
03756 for(y=0; y<8; y++){
03757 for(x=0; x<8; x++){
03758 double s= 0.25*(1<<BASIS_SHIFT);
03759 int index= 8*i + j;
03760 int perm_index= perm[index];
03761 if(i==0) s*= sqrt(0.5);
03762 if(j==0) s*= sqrt(0.5);
03763 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
03764 }
03765 }
03766 }
03767 }
03768 }
03769
03770 static int dct_quantize_refine(MpegEncContext *s,
03771 DCTELEM *block, int16_t *weight, DCTELEM *orig,
03772 int n, int qscale){
03773 int16_t rem[64];
03774 LOCAL_ALIGNED_16(DCTELEM, d1, [64]);
03775 const uint8_t *scantable= s->intra_scantable.scantable;
03776 const uint8_t *perm_scantable= s->intra_scantable.permutated;
03777
03778
03779 int run_tab[65];
03780 int prev_run=0;
03781 int prev_level=0;
03782 int qmul, qadd, start_i, last_non_zero, i, dc;
03783 uint8_t * length;
03784 uint8_t * last_length;
03785 int lambda;
03786 int rle_index, run, q = 1, sum;
03787 #ifdef REFINE_STATS
03788 static int count=0;
03789 static int after_last=0;
03790 static int to_zero=0;
03791 static int from_zero=0;
03792 static int raise=0;
03793 static int lower=0;
03794 static int messed_sign=0;
03795 #endif
03796
03797 if(basis[0][0] == 0)
03798 build_basis(s->dsp.idct_permutation);
03799
03800 qmul= qscale*2;
03801 qadd= (qscale-1)|1;
03802 if (s->mb_intra) {
03803 if (!s->h263_aic) {
03804 if (n < 4)
03805 q = s->y_dc_scale;
03806 else
03807 q = s->c_dc_scale;
03808 } else{
03809
03810 q = 1;
03811 qadd=0;
03812 }
03813 q <<= RECON_SHIFT-3;
03814
03815 dc= block[0]*q;
03816
03817 start_i = 1;
03818
03819
03820 length = s->intra_ac_vlc_length;
03821 last_length= s->intra_ac_vlc_last_length;
03822 } else {
03823 dc= 0;
03824 start_i = 0;
03825 length = s->inter_ac_vlc_length;
03826 last_length= s->inter_ac_vlc_last_length;
03827 }
03828 last_non_zero = s->block_last_index[n];
03829
03830 #ifdef REFINE_STATS
03831 {START_TIMER
03832 #endif
03833 dc += (1<<(RECON_SHIFT-1));
03834 for(i=0; i<64; i++){
03835 rem[i]= dc - (orig[i]<<RECON_SHIFT);
03836 }
03837 #ifdef REFINE_STATS
03838 STOP_TIMER("memset rem[]")}
03839 #endif
03840 sum=0;
03841 for(i=0; i<64; i++){
03842 int one= 36;
03843 int qns=4;
03844 int w;
03845
03846 w= FFABS(weight[i]) + qns*one;
03847 w= 15 + (48*qns*one + w/2)/w;
03848
03849 weight[i] = w;
03850
03851
03852 av_assert2(w>0);
03853 av_assert2(w<(1<<6));
03854 sum += w*w;
03855 }
03856 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
03857 #ifdef REFINE_STATS
03858 {START_TIMER
03859 #endif
03860 run=0;
03861 rle_index=0;
03862 for(i=start_i; i<=last_non_zero; i++){
03863 int j= perm_scantable[i];
03864 const int level= block[j];
03865 int coeff;
03866
03867 if(level){
03868 if(level<0) coeff= qmul*level - qadd;
03869 else coeff= qmul*level + qadd;
03870 run_tab[rle_index++]=run;
03871 run=0;
03872
03873 s->dsp.add_8x8basis(rem, basis[j], coeff);
03874 }else{
03875 run++;
03876 }
03877 }
03878 #ifdef REFINE_STATS
03879 if(last_non_zero>0){
03880 STOP_TIMER("init rem[]")
03881 }
03882 }
03883
03884 {START_TIMER
03885 #endif
03886 for(;;){
03887 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
03888 int best_coeff=0;
03889 int best_change=0;
03890 int run2, best_unquant_change=0, analyze_gradient;
03891 #ifdef REFINE_STATS
03892 {START_TIMER
03893 #endif
03894 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
03895
03896 if(analyze_gradient){
03897 #ifdef REFINE_STATS
03898 {START_TIMER
03899 #endif
03900 for(i=0; i<64; i++){
03901 int w= weight[i];
03902
03903 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
03904 }
03905 #ifdef REFINE_STATS
03906 STOP_TIMER("rem*w*w")}
03907 {START_TIMER
03908 #endif
03909 s->dsp.fdct(d1);
03910 #ifdef REFINE_STATS
03911 STOP_TIMER("dct")}
03912 #endif
03913 }
03914
03915 if(start_i){
03916 const int level= block[0];
03917 int change, old_coeff;
03918
03919 av_assert2(s->mb_intra);
03920
03921 old_coeff= q*level;
03922
03923 for(change=-1; change<=1; change+=2){
03924 int new_level= level + change;
03925 int score, new_coeff;
03926
03927 new_coeff= q*new_level;
03928 if(new_coeff >= 2048 || new_coeff < 0)
03929 continue;
03930
03931 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
03932 if(score<best_score){
03933 best_score= score;
03934 best_coeff= 0;
03935 best_change= change;
03936 best_unquant_change= new_coeff - old_coeff;
03937 }
03938 }
03939 }
03940
03941 run=0;
03942 rle_index=0;
03943 run2= run_tab[rle_index++];
03944 prev_level=0;
03945 prev_run=0;
03946
03947 for(i=start_i; i<64; i++){
03948 int j= perm_scantable[i];
03949 const int level= block[j];
03950 int change, old_coeff;
03951
03952 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
03953 break;
03954
03955 if(level){
03956 if(level<0) old_coeff= qmul*level - qadd;
03957 else old_coeff= qmul*level + qadd;
03958 run2= run_tab[rle_index++];
03959 }else{
03960 old_coeff=0;
03961 run2--;
03962 av_assert2(run2>=0 || i >= last_non_zero );
03963 }
03964
03965 for(change=-1; change<=1; change+=2){
03966 int new_level= level + change;
03967 int score, new_coeff, unquant_change;
03968
03969 score=0;
03970 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
03971 continue;
03972
03973 if(new_level){
03974 if(new_level<0) new_coeff= qmul*new_level - qadd;
03975 else new_coeff= qmul*new_level + qadd;
03976 if(new_coeff >= 2048 || new_coeff <= -2048)
03977 continue;
03978
03979
03980 if(level){
03981 if(level < 63 && level > -63){
03982 if(i < last_non_zero)
03983 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
03984 - length[UNI_AC_ENC_INDEX(run, level+64)];
03985 else
03986 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
03987 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
03988 }
03989 }else{
03990 av_assert2(FFABS(new_level)==1);
03991
03992 if(analyze_gradient){
03993 int g= d1[ scantable[i] ];
03994 if(g && (g^new_level) >= 0)
03995 continue;
03996 }
03997
03998 if(i < last_non_zero){
03999 int next_i= i + run2 + 1;
04000 int next_level= block[ perm_scantable[next_i] ] + 64;
04001
04002 if(next_level&(~127))
04003 next_level= 0;
04004
04005 if(next_i < last_non_zero)
04006 score += length[UNI_AC_ENC_INDEX(run, 65)]
04007 + length[UNI_AC_ENC_INDEX(run2, next_level)]
04008 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
04009 else
04010 score += length[UNI_AC_ENC_INDEX(run, 65)]
04011 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
04012 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
04013 }else{
04014 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
04015 if(prev_level){
04016 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
04017 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
04018 }
04019 }
04020 }
04021 }else{
04022 new_coeff=0;
04023 av_assert2(FFABS(level)==1);
04024
04025 if(i < last_non_zero){
04026 int next_i= i + run2 + 1;
04027 int next_level= block[ perm_scantable[next_i] ] + 64;
04028
04029 if(next_level&(~127))
04030 next_level= 0;
04031
04032 if(next_i < last_non_zero)
04033 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
04034 - length[UNI_AC_ENC_INDEX(run2, next_level)]
04035 - length[UNI_AC_ENC_INDEX(run, 65)];
04036 else
04037 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
04038 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
04039 - length[UNI_AC_ENC_INDEX(run, 65)];
04040 }else{
04041 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
04042 if(prev_level){
04043 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
04044 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
04045 }
04046 }
04047 }
04048
04049 score *= lambda;
04050
04051 unquant_change= new_coeff - old_coeff;
04052 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
04053
04054 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
04055 if(score<best_score){
04056 best_score= score;
04057 best_coeff= i;
04058 best_change= change;
04059 best_unquant_change= unquant_change;
04060 }
04061 }
04062 if(level){
04063 prev_level= level + 64;
04064 if(prev_level&(~127))
04065 prev_level= 0;
04066 prev_run= run;
04067 run=0;
04068 }else{
04069 run++;
04070 }
04071 }
04072 #ifdef REFINE_STATS
04073 STOP_TIMER("iterative step")}
04074 #endif
04075
04076 if(best_change){
04077 int j= perm_scantable[ best_coeff ];
04078
04079 block[j] += best_change;
04080
04081 if(best_coeff > last_non_zero){
04082 last_non_zero= best_coeff;
04083 av_assert2(block[j]);
04084 #ifdef REFINE_STATS
04085 after_last++;
04086 #endif
04087 }else{
04088 #ifdef REFINE_STATS
04089 if(block[j]){
04090 if(block[j] - best_change){
04091 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
04092 raise++;
04093 }else{
04094 lower++;
04095 }
04096 }else{
04097 from_zero++;
04098 }
04099 }else{
04100 to_zero++;
04101 }
04102 #endif
04103 for(; last_non_zero>=start_i; last_non_zero--){
04104 if(block[perm_scantable[last_non_zero]])
04105 break;
04106 }
04107 }
04108 #ifdef REFINE_STATS
04109 count++;
04110 if(256*256*256*64 % count == 0){
04111 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
04112 }
04113 #endif
04114 run=0;
04115 rle_index=0;
04116 for(i=start_i; i<=last_non_zero; i++){
04117 int j= perm_scantable[i];
04118 const int level= block[j];
04119
04120 if(level){
04121 run_tab[rle_index++]=run;
04122 run=0;
04123 }else{
04124 run++;
04125 }
04126 }
04127
04128 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
04129 }else{
04130 break;
04131 }
04132 }
04133 #ifdef REFINE_STATS
04134 if(last_non_zero>0){
04135 STOP_TIMER("iterative search")
04136 }
04137 }
04138 #endif
04139
04140 return last_non_zero;
04141 }
04142
04143 int ff_dct_quantize_c(MpegEncContext *s,
04144 DCTELEM *block, int n,
04145 int qscale, int *overflow)
04146 {
04147 int i, j, level, last_non_zero, q, start_i;
04148 const int *qmat;
04149 const uint8_t *scantable= s->intra_scantable.scantable;
04150 int bias;
04151 int max=0;
04152 unsigned int threshold1, threshold2;
04153
04154 s->dsp.fdct (block);
04155
04156 if(s->dct_error_sum)
04157 s->denoise_dct(s, block);
04158
04159 if (s->mb_intra) {
04160 if (!s->h263_aic) {
04161 if (n < 4)
04162 q = s->y_dc_scale;
04163 else
04164 q = s->c_dc_scale;
04165 q = q << 3;
04166 } else
04167
04168 q = 1 << 3;
04169
04170
04171 block[0] = (block[0] + (q >> 1)) / q;
04172 start_i = 1;
04173 last_non_zero = 0;
04174 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
04175 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
04176 } else {
04177 start_i = 0;
04178 last_non_zero = -1;
04179 qmat = s->q_inter_matrix[qscale];
04180 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
04181 }
04182 threshold1= (1<<QMAT_SHIFT) - bias - 1;
04183 threshold2= (threshold1<<1);
04184 for(i=63;i>=start_i;i--) {
04185 j = scantable[i];
04186 level = block[j] * qmat[j];
04187
04188 if(((unsigned)(level+threshold1))>threshold2){
04189 last_non_zero = i;
04190 break;
04191 }else{
04192 block[j]=0;
04193 }
04194 }
04195 for(i=start_i; i<=last_non_zero; i++) {
04196 j = scantable[i];
04197 level = block[j] * qmat[j];
04198
04199
04200
04201 if(((unsigned)(level+threshold1))>threshold2){
04202 if(level>0){
04203 level= (bias + level)>>QMAT_SHIFT;
04204 block[j]= level;
04205 }else{
04206 level= (bias - level)>>QMAT_SHIFT;
04207 block[j]= -level;
04208 }
04209 max |=level;
04210 }else{
04211 block[j]=0;
04212 }
04213 }
04214 *overflow= s->max_qcoeff < max;
04215
04216
04217 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
04218 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
04219
04220 return last_non_zero;
04221 }
04222
04223 #define OFFSET(x) offsetof(MpegEncContext, x)
04224 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
04225 static const AVOption h263_options[] = {
04226 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
04227 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
04228 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
04229 FF_MPV_COMMON_OPTS
04230 { NULL },
04231 };
04232
04233 static const AVClass h263_class = {
04234 .class_name = "H.263 encoder",
04235 .item_name = av_default_item_name,
04236 .option = h263_options,
04237 .version = LIBAVUTIL_VERSION_INT,
04238 };
04239
04240 AVCodec ff_h263_encoder = {
04241 .name = "h263",
04242 .type = AVMEDIA_TYPE_VIDEO,
04243 .id = AV_CODEC_ID_H263,
04244 .priv_data_size = sizeof(MpegEncContext),
04245 .init = ff_MPV_encode_init,
04246 .encode2 = ff_MPV_encode_picture,
04247 .close = ff_MPV_encode_end,
04248 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
04249 .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
04250 .priv_class = &h263_class,
04251 };
04252
04253 static const AVOption h263p_options[] = {
04254 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
04255 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
04256 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
04257 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
04258 FF_MPV_COMMON_OPTS
04259 { NULL },
04260 };
04261 static const AVClass h263p_class = {
04262 .class_name = "H.263p encoder",
04263 .item_name = av_default_item_name,
04264 .option = h263p_options,
04265 .version = LIBAVUTIL_VERSION_INT,
04266 };
04267
04268 AVCodec ff_h263p_encoder = {
04269 .name = "h263p",
04270 .type = AVMEDIA_TYPE_VIDEO,
04271 .id = AV_CODEC_ID_H263P,
04272 .priv_data_size = sizeof(MpegEncContext),
04273 .init = ff_MPV_encode_init,
04274 .encode2 = ff_MPV_encode_picture,
04275 .close = ff_MPV_encode_end,
04276 .capabilities = CODEC_CAP_SLICE_THREADS,
04277 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
04278 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
04279 .priv_class = &h263p_class,
04280 };
04281
04282 FF_MPV_GENERIC_CLASS(msmpeg4v2)
04283
04284 AVCodec ff_msmpeg4v2_encoder = {
04285 .name = "msmpeg4v2",
04286 .type = AVMEDIA_TYPE_VIDEO,
04287 .id = AV_CODEC_ID_MSMPEG4V2,
04288 .priv_data_size = sizeof(MpegEncContext),
04289 .init = ff_MPV_encode_init,
04290 .encode2 = ff_MPV_encode_picture,
04291 .close = ff_MPV_encode_end,
04292 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
04293 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
04294 .priv_class = &msmpeg4v2_class,
04295 };
04296
04297 FF_MPV_GENERIC_CLASS(msmpeg4v3)
04298
04299 AVCodec ff_msmpeg4v3_encoder = {
04300 .name = "msmpeg4",
04301 .type = AVMEDIA_TYPE_VIDEO,
04302 .id = AV_CODEC_ID_MSMPEG4V3,
04303 .priv_data_size = sizeof(MpegEncContext),
04304 .init = ff_MPV_encode_init,
04305 .encode2 = ff_MPV_encode_picture,
04306 .close = ff_MPV_encode_end,
04307 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
04308 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
04309 .priv_class = &msmpeg4v3_class,
04310 };
04311
04312 FF_MPV_GENERIC_CLASS(wmv1)
04313
04314 AVCodec ff_wmv1_encoder = {
04315 .name = "wmv1",
04316 .type = AVMEDIA_TYPE_VIDEO,
04317 .id = AV_CODEC_ID_WMV1,
04318 .priv_data_size = sizeof(MpegEncContext),
04319 .init = ff_MPV_encode_init,
04320 .encode2 = ff_MPV_encode_picture,
04321 .close = ff_MPV_encode_end,
04322 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
04323 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
04324 .priv_class = &wmv1_class,
04325 };