Go to the documentation of this file.
64 #define ME_CACHE_SIZE 1024
72 uint8_t *
const ref[3], uint8_t *
const ref2[3],
73 int x,
int y,
int ref_index)
78 ((y*
c->uvstride + x) >>
s->chroma_h_shift),
79 ((y*
c->uvstride + x) >>
s->chroma_h_shift),
81 for (
int i = 0;
i < 3;
i++) {
93 const int el =
FFMIN(e, 10);
98 for (
i = 0;
i < el;
i++)
104 for (
i = e - 1;
i >= el;
i--)
131 for (
int i =
log2 - 1;
i >= 0;
i--)
162 int plane_index,
ret;
172 s->spatial_decomposition_type = enc->
pred;
177 for(plane_index=0; plane_index<3; plane_index++){
178 s->plane[plane_index].diag_mc= 1;
179 s->plane[plane_index].htaps= 6;
180 s->plane[plane_index].hcoeff[0]= 40;
181 s->plane[plane_index].hcoeff[1]= -10;
182 s->plane[plane_index].hcoeff[2]= 2;
183 s->plane[plane_index].fast_mc= 1;
193 enc->qdsp.put_qpel_pixels_tab [0][dy+dx/4]=\
194 enc->qdsp.put_no_rnd_qpel_pixels_tab[0][dy+dx/4]=\
195 s->h264qpel.put_h264_qpel_pixels_tab[0][dy+dx/4];\
196 enc->qdsp.put_qpel_pixels_tab [1][dy+dx/4]=\
197 enc->qdsp.put_no_rnd_qpel_pixels_tab[1][dy+dx/4]=\
198 s->h264qpel.put_h264_qpel_pixels_tab[1][dy+dx/4];
263 s->colorspace_type= 0;
267 s->colorspace_type = 1;
285 if (!
s->input_picture)
292 int size=
s->b_width *
s->b_height << 2*
s->block_max_depth;
293 for(
i=0;
i<
s->max_ref_frames;
i++){
296 if (!
s->ref_mvs[
i] || !
s->ref_scores[
i])
305 static int pix_sum(
const uint8_t * pix,
int line_size,
int w,
int h)
310 for (
i = 0;
i <
h;
i++) {
311 for (j = 0; j <
w; j++) {
315 pix += line_size -
w;
321 static int pix_norm1(
const uint8_t * pix,
int line_size,
int w)
327 for (
i = 0;
i <
w;
i++) {
328 for (j = 0; j <
w; j ++) {
332 pix += line_size -
w;
364 #define P_TOPRIGHT P[3]
365 #define P_MEDIAN P[4]
367 #define FLAG_QPEL 1 //must be 1
373 uint8_t p_buffer[1024];
374 uint8_t i_buffer[1024];
375 uint8_t p_state[
sizeof(
s->block_state)];
376 uint8_t i_state[
sizeof(
s->block_state)];
378 uint8_t *pbbak=
s->c.bytestream;
379 uint8_t *pbbak_start=
s->c.bytestream_start;
380 int score, score2, iscore, i_len, p_len, block_s, sum, base_bits;
381 const int w=
s->b_width <<
s->block_max_depth;
382 const int h=
s->b_height <<
s->block_max_depth;
383 const int rem_depth=
s->block_max_depth -
level;
384 const int index= (x + y*
w) << rem_depth;
386 int trx= (x+1)<<rem_depth;
387 int try= (y+1)<<rem_depth;
394 int pl =
left->color[0];
395 int pcb=
left->color[1];
396 int pcr=
left->color[2];
400 const int stride=
s->current_picture->linesize[0];
401 const int uvstride=
s->current_picture->linesize[1];
402 const uint8_t *
const current_data[3] = {
s->input_picture->data[0] + (x + y*
stride)*block_w,
403 s->input_picture->data[1] + ((x*block_w)>>
s->chroma_h_shift) + ((y*uvstride*block_w)>>
s->chroma_v_shift),
404 s->input_picture->data[2] + ((x*block_w)>>
s->chroma_h_shift) + ((y*uvstride*block_w)>>
s->chroma_v_shift)};
406 int16_t last_mv[3][2];
408 const int shift= 1+qpel;
413 int ref, best_ref, ref_score, ref_mx, ref_my;
417 set_blocks(
s,
level, x, y, pl, pcb, pcr, 0, 0, 0,
BLOCK_INTRA);
430 last_mv[0][0]=
s->block[
index].mx;
431 last_mv[0][1]=
s->block[
index].my;
432 last_mv[1][0]= right->
mx;
433 last_mv[1][1]= right->
my;
434 last_mv[2][0]= bottom->
mx;
435 last_mv[2][1]= bottom->
my;
450 c->xmin = - x*block_w - 16+3;
451 c->ymin = - y*block_w - 16+3;
452 c->xmax = - (x+1)*block_w + (
w<<(
LOG2_MB_SIZE -
s->block_max_depth)) + 16-3;
453 c->ymax = - (y+1)*block_w + (
h<<(
LOG2_MB_SIZE -
s->block_max_depth)) + 16-3;
477 init_ref(
c, current_data,
s->last_picture[
ref]->data,
NULL, block_w*x, block_w*y, 0);
487 ref_score=
c->sub_motion_search(&enc->
m, &ref_mx, &ref_my, ref_score, 0, 0,
level-
LOG2_MB_SIZE+4, block_w);
495 if(score > ref_score){
505 base_bits=
get_rac_count(&
s->c) - 8*(
s->c.bytestream -
s->c.bytestream_start);
508 pc.bytestream= p_buffer;
509 memcpy(p_state,
s->block_state,
sizeof(
s->block_state));
511 if(
level!=
s->block_max_depth)
512 put_rac(&pc, &p_state[4 + s_context], 1);
514 if(
s->ref_frames > 1)
515 put_symbol(&pc, &p_state[128 + 1024 + 32*ref_context], best_ref, 0);
517 put_symbol(&pc, &p_state[128 + 32*(mx_context + 16*!!best_ref)], mx - pmx, 1);
518 put_symbol(&pc, &p_state[128 + 32*(my_context + 16*!!best_ref)], my - pmy, 1);
519 p_len= pc.bytestream - pc.bytestream_start;
522 block_s= block_w*block_w;
524 l= (sum + block_s/2)/block_s;
525 iscore =
pix_norm1(current_data[0],
stride, block_w) - 2*l*sum + l*l*block_s;
527 if (
s->nb_planes > 2) {
528 block_s= block_w*block_w>>(
s->chroma_h_shift +
s->chroma_v_shift);
529 sum =
pix_sum(current_data[1], uvstride, block_w>>
s->chroma_h_shift, block_w>>
s->chroma_v_shift);
530 cb= (sum + block_s/2)/block_s;
532 sum =
pix_sum(current_data[2], uvstride, block_w>>
s->chroma_h_shift, block_w>>
s->chroma_v_shift);
533 cr= (sum + block_s/2)/block_s;
540 ic.bytestream= i_buffer;
541 memcpy(i_state,
s->block_state,
sizeof(
s->block_state));
542 if(
level!=
s->block_max_depth)
543 put_rac(&ic, &i_state[4 + s_context], 1);
546 if (
s->nb_planes > 2) {
550 i_len= ic.bytestream - ic.bytestream_start;
553 av_assert1(iscore < 255*255*256 + enc->lambda2*10);
559 int varc= iscore >> 8;
560 int vard= score >> 8;
561 if (vard <= 64 || vard < varc)
564 c->scene_change_score += enc->
m.
qscale;
567 if(
level!=
s->block_max_depth){
568 put_rac(&
s->c, &
s->block_state[4 + s_context], 0);
575 if(score2 < score && score2 < iscore)
581 memcpy(pbbak, i_buffer, i_len);
583 s->c.bytestream_start= pbbak_start;
584 s->c.bytestream= pbbak + i_len;
585 set_blocks(
s,
level, x, y, l,
cb,
cr, pmx, pmy, 0,
BLOCK_INTRA);
586 memcpy(
s->block_state, i_state,
sizeof(
s->block_state));
589 memcpy(pbbak, p_buffer, p_len);
591 s->c.bytestream_start= pbbak_start;
592 s->c.bytestream= pbbak + p_len;
593 set_blocks(
s,
level, x, y, pl, pcb, pcr, mx, my, best_ref, 0);
594 memcpy(
s->block_state, p_state,
sizeof(
s->block_state));
600 const int w=
s->b_width <<
s->block_max_depth;
601 const int rem_depth=
s->block_max_depth -
level;
602 const int index= (x + y*
w) << rem_depth;
603 int trx= (x+1)<<rem_depth;
609 int pl =
left->color[0];
610 int pcb=
left->color[1];
611 int pcr=
left->color[2];
619 set_blocks(
s,
level, x, y, pl, pcb, pcr, 0, 0, 0,
BLOCK_INTRA);
623 if(
level!=
s->block_max_depth){
625 put_rac(&
s->c, &
s->block_state[4 + s_context], 1);
627 put_rac(&
s->c, &
s->block_state[4 + s_context], 0);
639 if (
s->nb_planes > 2) {
643 set_blocks(
s,
level, x, y,
b->color[0],
b->color[1],
b->color[2], pmx, pmy, 0,
BLOCK_INTRA);
647 if(
s->ref_frames > 1)
648 put_symbol(&
s->c, &
s->block_state[128 + 1024 + 32*ref_context],
b->ref, 0);
649 put_symbol(&
s->c, &
s->block_state[128 + 32*mx_context],
b->mx - pmx, 1);
650 put_symbol(&
s->c, &
s->block_state[128 + 32*my_context],
b->my - pmy, 1);
651 set_blocks(
s,
level, x, y, pl, pcb, pcr,
b->mx,
b->my,
b->ref, 0);
659 Plane *p= &
s->plane[plane_index];
660 const int block_size =
MB_SIZE >>
s->block_max_depth;
661 const int block_w = plane_index ? block_size>>
s->chroma_h_shift : block_size;
662 const int block_h = plane_index ? block_size>>
s->chroma_v_shift : block_size;
664 const int obmc_stride= plane_index ? (2*block_size)>>
s->chroma_h_shift : 2*block_size;
665 const int ref_stride=
s->current_picture->linesize[plane_index];
666 const uint8_t *
src =
s->input_picture->data[plane_index];
668 const int b_stride =
s->b_width <<
s->block_max_depth;
671 int index= mb_x + mb_y*b_stride;
680 b->color[plane_index]= 0;
681 memset(dst, 0, obmc_stride*obmc_stride*
sizeof(
IDWTELEM));
684 int mb_x2= mb_x + (
i &1) - 1;
685 int mb_y2= mb_y + (
i>>1) - 1;
686 int x= block_w*mb_x2 + block_w/2;
687 int y= block_h*mb_y2 + block_h/2;
690 x, y, block_w, block_h,
w,
h, obmc_stride, ref_stride, obmc_stride, mb_x2, mb_y2, 0, 0, plane_index);
692 for(y2=
FFMAX(y, 0); y2<
FFMIN(
h, y+block_h); y2++){
693 for(x2=
FFMAX(x, 0); x2<
FFMIN(
w, x+block_w); x2++){
694 int index= x2-(block_w*mb_x - block_w/2) + (y2-(block_h*mb_y - block_h/2))*obmc_stride;
695 int obmc_v= obmc[
index];
697 if(y<0) obmc_v += obmc[
index + block_h*obmc_stride];
698 if(x<0) obmc_v += obmc[
index + block_w];
699 if(y+block_h>
h) obmc_v += obmc[
index - block_h*obmc_stride];
700 if(x+block_w>
w) obmc_v += obmc[
index - block_w];
706 aa += obmc_v * obmc_v;
716 const int b_stride =
s->b_width <<
s->block_max_depth;
717 const int b_height =
s->b_height<<
s->block_max_depth;
718 int index= x + y*b_stride;
728 if(x<0 || x>=b_stride || y>=b_height)
754 int plane_index, uint8_t (*obmc_edged)[
MB_SIZE * 2])
757 Plane *p= &
s->plane[plane_index];
758 const int block_size =
MB_SIZE >>
s->block_max_depth;
759 const int block_w = plane_index ? block_size>>
s->chroma_h_shift : block_size;
760 const int block_h = plane_index ? block_size>>
s->chroma_v_shift : block_size;
761 const int obmc_stride= plane_index ? (2*block_size)>>
s->chroma_h_shift : 2*block_size;
762 const int ref_stride=
s->current_picture->linesize[plane_index];
763 uint8_t *dst=
s->current_picture->data[plane_index];
764 const uint8_t *
src =
s->input_picture->data[plane_index];
766 uint8_t *cur =
s->scratchbuf;
767 uint8_t *
tmp =
s->emu_edge_buffer;
768 const int b_stride =
s->b_width <<
s->block_max_depth;
769 const int b_height =
s->b_height<<
s->block_max_depth;
775 int sx= block_w*mb_x - block_w/2;
776 int sy= block_h*mb_y - block_h/2;
777 int x0=
FFMAX(0,-sx);
778 int y0=
FFMAX(0,-sy);
779 int x1=
FFMIN(block_w*2,
w-sx);
780 int y1=
FFMIN(block_h*2,
h-sy);
785 ff_snow_pred_block(
s, cur,
tmp, ref_stride, sx, sy, block_w*2, block_h*2, &
s->block[mb_x + mb_y*b_stride], plane_index,
w,
h);
787 for(y=y0; y<y1; y++){
788 const uint8_t *obmc1= obmc_edged[y];
790 uint8_t *cur1 = cur + y*ref_stride;
791 uint8_t *dst1 = dst + sx + (sy+y)*ref_stride;
792 for(x=x0; x<x1; x++){
793 #if FRAC_BITS >= LOG2_OBMC_MAX
799 if(v&(~255)) v= ~(v>>31);
806 && (mb_x == 0 || mb_x == b_stride-1)
807 && (mb_y == 0 || mb_y == b_height-1)){
817 memcpy(dst + sx+x0 + (sy+y)*ref_stride, cur + x0 + y*ref_stride, x1-x0);
828 distortion =
ff_w97_32_c(&enc->
m,
src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
830 distortion =
ff_w53_32_c(&enc->
m,
src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
834 int off = sx+16*(
i&1) + (sy+16*(
i>>1))*ref_stride;
835 distortion += enc->
mecc.
me_cmp[0](&enc->
m,
src + off, dst + off, ref_stride, 16);
840 distortion = enc->
mecc.
me_cmp[0](&enc->
m,
src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
851 if(mb_x == b_stride-2)
854 return distortion + rate*penalty_factor;
861 Plane *p= &
s->plane[plane_index];
862 const int block_size =
MB_SIZE >>
s->block_max_depth;
863 const int block_w = plane_index ? block_size>>
s->chroma_h_shift : block_size;
864 const int block_h = plane_index ? block_size>>
s->chroma_v_shift : block_size;
866 const int obmc_stride= plane_index ? (2*block_size)>>
s->chroma_h_shift : 2*block_size;
867 const int ref_stride=
s->current_picture->linesize[plane_index];
868 uint8_t *dst=
s->current_picture->data[plane_index];
869 const uint8_t *
src =
s->input_picture->data[plane_index];
873 const int b_stride =
s->b_width <<
s->block_max_depth;
883 int mb_x2= mb_x + (
i%3) - 1;
884 int mb_y2= mb_y + (
i/3) - 1;
885 int x= block_w*mb_x2 + block_w/2;
886 int y= block_h*mb_y2 + block_h/2;
889 x, y, block_w, block_h,
w,
h, 0, ref_stride, obmc_stride, mb_x2, mb_y2, 1, 1, plane_index);
892 for(y2= y; y2<0; y2++)
893 memcpy(dst + x + y2*ref_stride,
src + x + y2*ref_stride, block_w);
894 for(y2=
h; y2<y+block_h; y2++)
895 memcpy(dst + x + y2*ref_stride,
src + x + y2*ref_stride, block_w);
897 for(y2= y; y2<y+block_h; y2++)
898 memcpy(dst + x + y2*ref_stride,
src + x + y2*ref_stride, -x);
901 for(y2= y; y2<y+block_h; y2++)
902 memcpy(dst +
w + y2*ref_stride,
src +
w + y2*ref_stride, x+block_w -
w);
906 distortion += enc->
mecc.
me_cmp[block_w==8](&enc->
m,
src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
920 for(
i=merged?4:0;
i<9;
i++){
921 static const int dxy[9][2] = {{0,0},{1,0},{0,1},{1,1},{2,0},{2,1},{-1,2},{0,2},{1,2}};
925 return distortion + rate*penalty_factor;
929 const int w=
b->width;
930 const int h=
b->height;
935 int *runs =
s->run_buffer;
942 int l=0, lt=0, t=0, rt=0;
964 if(px<b->parent->width && py<b->parent->height)
965 p= parent[px + py*2*
stride];
969 runs[run_index++]=
run;
977 max_index= run_index;
978 runs[run_index++]=
run;
980 run= runs[run_index++];
983 if(run_index <= max_index)
987 if(
s->c.bytestream_end -
s->c.bytestream <
w*40){
993 int l=0, lt=0, t=0, rt=0;
1015 if(px<b->parent->width && py<b->parent->height)
1016 p= parent[px + py*2*
stride];
1024 run= runs[run_index++];
1026 if(run_index <= max_index)
1036 int l2= 2*
FFABS(l) + (l<0);
1056 uint8_t (*obmc_edged)[
MB_SIZE * 2],
int *best_rd)
1059 const int b_stride=
s->b_width <<
s->block_max_depth;
1067 block->color[0] = p[0];
1068 block->color[1] = p[1];
1069 block->color[2] = p[2];
1087 int mb_x,
int mb_y,
int p0,
int p1,
1088 uint8_t (*obmc_edged)[
MB_SIZE * 2],
int *best_rd)
1091 const int b_stride =
s->b_width <<
s->block_max_depth;
1113 if (rd < *best_rd) {
1123 int p0,
int p1,
int ref,
int *best_rd)
1126 const int b_stride=
s->b_width <<
s->block_max_depth;
1135 backup[0] =
block[0];
1136 backup[1] =
block[1];
1137 backup[2] =
block[b_stride];
1138 backup[3] =
block[b_stride + 1];
1163 block[0]= backup[0];
1164 block[1]= backup[1];
1165 block[b_stride]= backup[2];
1166 block[b_stride+1]= backup[3];
1174 int pass, mb_x, mb_y;
1175 const int b_width =
s->b_width <<
s->block_max_depth;
1176 const int b_height=
s->b_height <<
s->block_max_depth;
1177 const int b_stride= b_width;
1182 uint8_t
state[
sizeof(
s->block_state)];
1183 memcpy(
state,
s->block_state,
sizeof(
s->block_state));
1184 for(mb_y= 0; mb_y<
s->b_height; mb_y++)
1185 for(mb_x= 0; mb_x<
s->b_width; mb_x++)
1188 memcpy(
s->block_state,
state,
sizeof(
s->block_state));
1191 for(pass=0; pass<25; pass++){
1194 for(mb_y= 0; mb_y<b_height; mb_y++){
1195 for(mb_x= 0; mb_x<b_width; mb_x++){
1196 int dia_change,
i, j,
ref;
1197 int best_rd= INT_MAX, ref_rd;
1199 const int index= mb_x + mb_y * b_stride;
1209 const int b_w= (
MB_SIZE >>
s->block_max_depth);
1225 for (y = 0; y < b_w * 2; y++)
1226 memcpy(obmc_edged[y],
ff_obmc_tab[
s->block_max_depth] + y * b_w * 2, b_w * 2);
1228 for(y=0; y<b_w*2; y++)
1229 memset(obmc_edged[y], obmc_edged[y][0] + obmc_edged[y][b_w-1], b_w);
1230 if(mb_x==b_stride-1)
1231 for(y=0; y<b_w*2; y++)
1232 memset(obmc_edged[y]+b_w, obmc_edged[y][b_w] + obmc_edged[y][b_w*2-1], b_w);
1234 for(x=0; x<b_w*2; x++)
1235 obmc_edged[0][x] += obmc_edged[b_w-1][x];
1236 for(y=1; y<b_w; y++)
1237 memcpy(obmc_edged[y], obmc_edged[0], b_w*2);
1239 if(mb_y==b_height-1){
1240 for(x=0; x<b_w*2; x++)
1241 obmc_edged[b_w*2-1][x] += obmc_edged[b_w][x];
1242 for(y=b_w; y<b_w*2-1; y++)
1243 memcpy(obmc_edged[y], obmc_edged[b_w*2-1], b_w*2);
1248 if(mb_x==0 || mb_y==0 || mb_x==b_width-1 || mb_y==b_height-1){
1249 const uint8_t *
src =
s->input_picture->data[0];
1250 uint8_t *dst=
s->current_picture->data[0];
1251 const int stride=
s->current_picture->linesize[0];
1252 const int block_w=
MB_SIZE >>
s->block_max_depth;
1253 const int block_h=
MB_SIZE >>
s->block_max_depth;
1254 const int sx= block_w*mb_x - block_w/2;
1255 const int sy= block_h*mb_y - block_h/2;
1256 const int w=
s->plane[0].width;
1257 const int h=
s->plane[0].height;
1262 for(y=
h; y<sy+block_h*2; y++)
1265 for(y=sy; y<sy+block_h*2; y++)
1268 if(sx+block_w*2 >
w){
1269 for(y=sy; y<sy+block_h*2; y++)
1275 for(
i=0;
i <
s->nb_planes;
i++)
1288 int16_t (*mvr)[2]= &
s->ref_mvs[
ref][
index];
1297 check_block_inter(enc, mb_x, mb_y, mvr[-b_stride][0], mvr[-b_stride][1], obmc_edged, &best_rd);
1299 check_block_inter(enc, mb_x, mb_y, mvr[-1][0], mvr[-1][1], obmc_edged, &best_rd);
1303 check_block_inter(enc, mb_x, mb_y, mvr[b_stride][0], mvr[b_stride][1], obmc_edged, &best_rd);
1308 int newx =
block->mx;
1309 int newy =
block->my;
1312 for(
i=0;
i < dia_size;
i++){
1314 dia_change |=
check_block_inter(enc, mb_x, mb_y, newx+4*(
i-j), newy+(4*j), obmc_edged, &best_rd);
1315 dia_change |=
check_block_inter(enc, mb_x, mb_y, newx-4*(
i-j), newy-(4*j), obmc_edged, &best_rd);
1316 dia_change |=
check_block_inter(enc, mb_x, mb_y, newx-(4*j), newy+4*(
i-j), obmc_edged, &best_rd);
1317 dia_change |=
check_block_inter(enc, mb_x, mb_y, newx+(4*j), newy-4*(
i-j), obmc_edged, &best_rd);
1323 static const int square[8][2]= {{+1, 0},{-1, 0},{ 0,+1},{ 0,-1},{+1,+1},{-1,-1},{+1,-1},{-1,+1},};
1330 mvr[0][0]=
block->mx;
1331 mvr[0][1]=
block->my;
1332 if(ref_rd > best_rd){
1359 if(
s->block_max_depth == 1){
1361 for(mb_y= 0; mb_y<b_height; mb_y+=2){
1362 for(mb_x= 0; mb_x<b_width; mb_x+=2){
1364 int best_rd, init_rd;
1365 const int index= mb_x + mb_y * b_stride;
1370 b[2]=
b[0]+b_stride;
1385 (
b[0]->mx +
b[1]->mx +
b[2]->mx +
b[3]->mx + 2) >> 2,
1386 (
b[0]->my +
b[1]->my +
b[2]->my +
b[3]->my + 2) >> 2, 0, &best_rd);
1392 if(init_rd != best_rd)
1425 const int w=
b->width;
1426 const int h=
b->height;
1429 int x,y, thres1, thres2;
1447 if((
unsigned)(
i+thres1) > thres2){
1467 if((
unsigned)(
i+thres1) > thres2){
1486 const int w=
b->width;
1487 const int h=
b->height;
1508 const int w=
b->width;
1509 const int h=
b->height;
1512 for(y=
h-1; y>=0; y--){
1513 for(x=
w-1; x>=0; x--){
1532 const int w=
b->width;
1533 const int h=
b->height;
1556 int plane_index,
level, orientation;
1558 for(plane_index=0; plane_index<
FFMIN(
s->nb_planes, 2); plane_index++){
1560 for(orientation=
level ? 1:0; orientation<4; orientation++){
1561 if(orientation==2)
continue;
1562 put_symbol(&
s->c,
s->header_state,
s->plane[plane_index].band[
level][orientation].qlog, 1);
1572 memset(kstate,
MID_STATE,
sizeof(kstate));
1575 if(
s->keyframe ||
s->always_reset){
1577 s->last_spatial_decomposition_type=
1581 s->last_block_max_depth= 0;
1582 for(plane_index=0; plane_index<2; plane_index++){
1583 Plane *p= &
s->plane[plane_index];
1591 put_rac(&
s->c,
s->header_state,
s->always_reset);
1592 put_symbol(&
s->c,
s->header_state,
s->temporal_decomposition_type, 0);
1593 put_symbol(&
s->c,
s->header_state,
s->temporal_decomposition_count, 0);
1594 put_symbol(&
s->c,
s->header_state,
s->spatial_decomposition_count, 0);
1596 if (
s->nb_planes > 2) {
1600 put_rac(&
s->c,
s->header_state,
s->spatial_scalability);
1609 for(plane_index=0; plane_index<
FFMIN(
s->nb_planes, 2); plane_index++){
1610 Plane *p= &
s->plane[plane_index];
1615 put_rac(&
s->c,
s->header_state, update_mc);
1617 for(plane_index=0; plane_index<
FFMIN(
s->nb_planes, 2); plane_index++){
1618 Plane *p= &
s->plane[plane_index];
1625 if(
s->last_spatial_decomposition_count !=
s->spatial_decomposition_count){
1627 put_symbol(&
s->c,
s->header_state,
s->spatial_decomposition_count, 0);
1633 put_symbol(&
s->c,
s->header_state,
s->spatial_decomposition_type -
s->last_spatial_decomposition_type, 1);
1635 put_symbol(&
s->c,
s->header_state,
s->mv_scale -
s->last_mv_scale, 1);
1636 put_symbol(&
s->c,
s->header_state,
s->qbias -
s->last_qbias , 1);
1637 put_symbol(&
s->c,
s->header_state,
s->block_max_depth -
s->last_block_max_depth, 1);
1645 for(plane_index=0; plane_index<2; plane_index++){
1646 Plane *p= &
s->plane[plane_index];
1653 s->last_spatial_decomposition_type =
s->spatial_decomposition_type;
1654 s->last_qlog =
s->qlog;
1655 s->last_qbias =
s->qbias;
1656 s->last_mv_scale =
s->mv_scale;
1657 s->last_block_max_depth =
s->block_max_depth;
1658 s->last_spatial_decomposition_count =
s->spatial_decomposition_count;
1672 uint32_t coef_sum= 0;
1673 int level, orientation, delta_qlog;
1676 for(orientation=
level ? 1 : 0; orientation<4; orientation++){
1679 const int w=
b->width;
1680 const int h=
b->height;
1684 const int qdiv= (1<<16)/qmul;
1694 coef_sum+=
abs(buf[x+y*
stride]) * qdiv >> 16;
1700 coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
1715 s->qlog+= delta_qlog;
1722 int level, orientation, x, y;
1726 for(orientation=
level ? 1 : 0; orientation<4; orientation++){
1730 memset(
s->spatial_idwt_buffer, 0,
sizeof(*
s->spatial_idwt_buffer)*
width*
height);
1731 ibuf[
b->width/2 +
b->height/2*
b->stride]= 256*16;
1734 for(x=0; x<
width; x++){
1735 int64_t
d=
s->spatial_idwt_buffer[x + y*
width]*16;
1739 if (orientation == 2)
1742 if (orientation != 1)
1750 const AVFrame *pict,
int *got_packet)
1758 const int width=
s->avctx->width;
1759 const int height=
s->avctx->height;
1760 int level, orientation, plane_index,
i, y,
ret;
1761 uint8_t rc_header_bak[
sizeof(
s->header_state)];
1762 uint8_t rc_block_bak[
sizeof(
s->block_state)];
1770 for(
i=0;
i <
s->nb_planes;
i++){
1771 int hshift=
i ?
s->chroma_h_shift : 0;
1772 int vshift=
i ?
s->chroma_v_shift : 0;
1774 memcpy(&
s->input_picture->data[
i][y *
s->input_picture->linesize[
i]],
1784 pic =
s->input_picture;
1813 if (
s->current_picture->data[0]) {
1814 int w =
s->avctx->width;
1815 int h =
s->avctx->height;
1818 s->current_picture->linesize[0],
w ,
h ,
1820 if (
s->current_picture->data[2]) {
1822 s->current_picture->linesize[1],
w>>
s->chroma_h_shift,
h>>
s->chroma_v_shift,
1825 s->current_picture->linesize[2],
w>>
s->chroma_h_shift,
h>>
s->chroma_v_shift,
1840 int block_width = (
width +15)>>4;
1841 int block_height= (
height+15)>>4;
1842 int stride=
s->current_picture->linesize[0];
1849 mpv-> new_picture =
s->input_picture;
1852 mpv->
uvlinesize =
s->current_picture->linesize[1];
1874 mpv->
hdsp =
s->hdsp;
1876 s->hdsp = mpv->
hdsp;
1881 memcpy(rc_header_bak,
s->header_state,
sizeof(
s->header_state));
1882 memcpy(rc_block_bak,
s->block_state,
sizeof(
s->block_state));
1887 s->spatial_decomposition_count= 5;
1889 while( !(
width >>(
s->chroma_h_shift +
s->spatial_decomposition_count))
1890 || !(
height>>(
s->chroma_v_shift +
s->spatial_decomposition_count)))
1891 s->spatial_decomposition_count--;
1893 if (
s->spatial_decomposition_count <= 0) {
1903 if(
s->last_spatial_decomposition_count !=
s->spatial_decomposition_count){
1904 for(plane_index=0; plane_index <
s->nb_planes; plane_index++){
1910 mpv->
misc_bits = 8 * (
s->c.bytestream -
s->c.bytestream_start);
1914 for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1915 Plane *p= &
s->plane[plane_index];
1923 if(pict->
data[plane_index])
1957 ff_spatial_dwt(
s->spatial_dwt_buffer,
s->temp_dwt_buffer,
w,
h,
w,
s->spatial_decomposition_type,
s->spatial_decomposition_count);
1959 if (enc->
pass1_rc && plane_index==0) {
1961 if (delta_qlog <= INT_MIN)
1966 memcpy(
s->header_state, rc_header_bak,
sizeof(
s->header_state));
1967 memcpy(
s->block_state, rc_block_bak,
sizeof(
s->block_state));
1974 for(orientation=
level ? 1 : 0; orientation<4; orientation++){
1989 for(orientation=
level ? 1 : 0; orientation<4; orientation++){
1996 ff_spatial_idwt(
s->spatial_idwt_buffer,
s->temp_idwt_buffer,
w,
h,
w,
s->spatial_decomposition_type,
s->spatial_decomposition_count);
2000 s->spatial_idwt_buffer[y*
w + x] *= 1 <<
FRAC_BITS;
2010 s->current_picture->data[plane_index][y*
s->current_picture->linesize[plane_index] + x]=
2011 pict->
data[plane_index][y*pict->
linesize[plane_index] + x];
2015 memset(
s->spatial_idwt_buffer, 0,
sizeof(
IDWTELEM)*
w*
h);
2022 if(pict->
data[plane_index])
2025 int d=
s->current_picture->data[plane_index][y*
s->current_picture->linesize[plane_index] + x] - pict->
data[plane_index][y*pict->
linesize[plane_index] + x];
2029 s->avctx->error[plane_index] +=
error;
2040 s->current_picture->pict_type = pic->
pict_type;
2041 s->current_picture->quality = pic->
quality;
2042 mpv->
frame_bits = 8 * (
s->c.bytestream -
s->c.bytestream_start);
2044 mpv->
total_bits += 8*(
s->c.bytestream -
s->c.bytestream_start);
2060 s->current_picture->pict_type);
2097 #define OFFSET(x) offsetof(SnowEncContext, x)
2098 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2105 {
"memc_only",
"Only do ME/MC (I frames -> ref, P frame -> ME+MC).",
OFFSET(memc_only),
AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1,
VE },
2106 {
"no_bitstream",
"Skip final bitstream writeout.",
OFFSET(no_bitstream),
AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1,
VE },
2107 {
"intra_penalty",
"Penalty for intra blocks in block decission",
OFFSET(intra_penalty),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX,
VE },
2108 {
"iterative_dia_size",
"Dia size for the iterative ME",
OFFSET(iterative_dia_size),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX,
VE },
2109 {
"sc_threshold",
"Scene change threshold",
OFFSET(scenechange_threshold),
AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX,
VE },
2113 {
"rc_eq",
"Set rate control equation. When computing the expression, besides the standard functions "
2114 "defined in the section 'Expression Evaluation', the following functions are available: "
2115 "bits2qp(bits), qp2bits(qp). Also the following constants are available: iTex pTex tex mv "
2116 "fCode iCount mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex avgTex.",
static void error(const char *err)
static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
AVPixelFormat
Pixel format.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
uint8_t * obmc_scratchpad
unsigned int lambda
Lagrange multiplier used in rate distortion.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int ratecontrol_1pass(SnowEncContext *enc, AVFrame *pict)
static double cb(void *priv, double x, double y)
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
static av_cold int encode_end(AVCodecContext *avctx)
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
int scenechange_threshold
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Motion estimation context.
#define AV_CODEC_CAP_ENCODER_RECON_FRAME
The encoder is able to output reconstructed frame data, i.e.
Picture current_picture
copy of the current picture structure.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int mb_num
number of MBs of a picture
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static void update_last_header_values(SnowContext *s)
static void iterative_me(SnowEncContext *enc)
static int get_penalty_factor(int lambda, int lambda2, int type)
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
int unrestricted_mv
mv can point outside of the coded picture
struct AVCodecContext * avctx
av_cold int ff_rate_control_init(MpegEncContext *s)
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
int height
picture size. must be a multiple of 16
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
av_cold void ff_snow_common_end(SnowContext *s)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void ff_spatial_dwt(DWTELEM *buffer, DWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
enum OutputFormat out_format
output format
uint8_t type
Bitfield of BLOCK_*.
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, int ref, int *best_rd)
int mb_height
number of MBs horizontally & vertically
AVCodec p
The public AVCodec.
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
unsigned me_cache_generation
static void encode_blocks(SnowEncContext *enc, int search)
av_cold void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size)
ptrdiff_t linesize
line size, in bytes, may be different from width
int refs
number of reference frames
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3], uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
int flags
AV_CODEC_FLAG_*.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride, int sx, int sy, int b_w, int b_h, const BlockNode *block, int plane_index, int w, int h)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
#define FF_CODEC_ENCODE_CB(func)
int64_t mb_var_sum
sum of MB variance for current frame
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
int ff_snow_common_init_after_header(AVCodecContext *avctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
int global_quality
Global quality for codecs which cannot change it per frame.
#define AV_CEIL_RSHIFT(a, b)
#define BLOCK_OPT
Block needs no checks in this round of iterative motion estiation.
static void calculate_visual_weight(SnowContext *s, Plane *p)
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
int64_t bit_rate
wanted bit rate
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
#define AV_INPUT_BUFFER_MIN_SIZE
int frame_bits
bits used for the current frame
static int pix_norm1(const uint8_t *pix, int line_size, int w)
av_cold int ff_snow_common_init(AVCodecContext *avctx)
static int get_encode_buffer(SnowContext *s, AVFrame *frame)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
uint64_t encoding_error[SNOW_MAX_PLANES]
#define CODEC_LONG_NAME(str)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
#define LIBAVUTIL_VERSION_INT
void ff_write_pass1_stats(MpegEncContext *s)
Describe the class of an AVClass context structure.
int ff_epzs_motion_search(struct MpegEncContext *s, int *mx_ptr, int *my_ptr, int P[10][2], int src_index, int ref_index, const int16_t(*last_mv)[2], int ref_mv_scale, int size, int h)
unsigned me_cache[ME_CACHE_SIZE]
int f_code
forward MV resolution
static int bias(int x, int c)
int16_t my
Motion vector component Y, see mv_scale.
static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index, uint8_t(*obmc_edged)[MB_SIZE *2])
struct AVCodecInternal * internal
Private context used for internal data.
int64_t bit_rate
the average bitrate
int ff_rac_terminate(RangeCoder *c, int version)
Terminates the range coder.
#define ROUNDED_DIV(a, b)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
void ff_snow_release_buffer(AVCodecContext *avctx)
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
int ff_w53_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Picture * last_picture_ptr
pointer to the previous picture.
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void init_ref(MotionEstContext *c, const uint8_t *const src[3], uint8_t *const ref[3], uint8_t *const ref2[3], int x, int y, int ref_index)
static void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
char * stats_out
pass1 encoding statistics output buffer
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int display_picture_number
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
static void encode_header(SnowContext *s)
static int shift(int a, int b)
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
int8_t last_hcoeff[HTAPS_MAX/2]
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
static int pix_sum(const uint8_t *pix, int line_size, int w, int h)
const FFCodec ff_snow_encoder
int quarter_sample
1->qpel, 0->half pel ME/MC
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
static void encode_q_branch2(SnowContext *s, int level, int x, int y)
int ff_get_mb_score(struct MpegEncContext *s, int mx, int my, int src_index, int ref_index, int size, int h, int add_rate)
const int8_t ff_quant3bA[256]
int8_t hcoeff[HTAPS_MAX/2]
const uint8_t *const ff_obmc_tab[4]
Picture * current_picture_ptr
pointer to the current picture
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
#define ENCODER_EXTRA_BITS
#define AV_CODEC_FLAG_RECON_FRAME
Request the encoder to output reconstructed frames, i.e. frames that would be produced by decoding th...
#define i(width, name, range_min, range_max)
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
static int get_block_bits(SnowContext *s, int x, int y, int w)
const uint32_t ff_square_tab[512]
#define BLOCK_INTRA
Intra block, inter otherwise.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static int qscale2qlog(int qscale)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
int dia_size
ME diamond size & shape.
void ff_h263_encode_init(MpegEncContext *s)
me_cmp_func me_sub_cmp[6]
int mb_lmin
minimum MB Lagrange multiplier
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const uint8_t ff_qexp[QROOT]
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
const char * name
Name of the codec implementation.
static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
int ff_init_me(MpegEncContext *s)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
av_cold void ff_rate_control_uninit(MpegEncContext *s)
void * av_calloc(size_t nmemb, size_t size)
uint32_t * score_map
map to store the scores
int motion_est
ME algorithm.
int64_t frame_num
Frame counter, set by libavcodec.
MpegvideoEncDSPContext mpvencdsp
static const float pred[4]
static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static av_cold int encode_init(AVCodecContext *avctx)
static const AVOption options[]
AVFrame * recon_frame
When the AV_CODEC_FLAG_RECON_FRAME flag is used.
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
void ff_snow_reset_contexts(SnowContext *s)
int qlog
log(qscale)/log[2^(1/6)]
static void encode_qlogs(SnowContext *s)
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
main external API structure.
static int ref[MAX_W *MAX_W]
static int get_rac_count(RangeCoder *c)
int mb_lmax
maximum MB Lagrange multiplier
static void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
Picture last_picture
copy of the previous picture structure.
uint32_t * map
map to avoid duplicate evaluations
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PICTURE_TYPE_P
Predicted.
static av_always_inline int same_block(BlockNode *a, BlockNode *b)
SubBand band[DWT_LEVELS_3D][4]
int16_t mx
Motion vector component X, see mv_scale.
int ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
This structure stores compressed data.
static double cr(void *priv, double x, double y)
int ff_snow_frames_prepare(SnowContext *s)
static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias)
static void dequantize(SnowContext *s, SubBand *b, IDWTELEM *src, int stride)
int ff_w97_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
int width
picture width / height.
static const BlockNode null_block
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
The exact code depends on how similar the blocks are and how related they are to the block
int misc_bits
cbp, mb_type
static const AVClass snowenc_class
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
static av_always_inline int check_block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
int ff_snow_alloc_blocks(SnowContext *s)
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
uint8_t ref
Reference frame index.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.