Go to the documentation of this file.
64 #define ME_CACHE_SIZE 1024
72 uint8_t *
const ref[3], uint8_t *
const ref2[3],
73 int x,
int y,
int ref_index)
78 ((y*
c->uvstride + x) >>
s->chroma_h_shift),
79 ((y*
c->uvstride + x) >>
s->chroma_h_shift),
81 for (
int i = 0;
i < 3;
i++) {
93 const int el =
FFMIN(e, 10);
98 for (
i = 0;
i < el;
i++)
104 for (
i = e - 1;
i >= el;
i--)
131 for (
int i =
log2 - 1;
i >= 0;
i--)
162 int plane_index,
ret;
172 s->spatial_decomposition_type = enc->
pred;
177 for(plane_index=0; plane_index<3; plane_index++){
178 s->plane[plane_index].diag_mc= 1;
179 s->plane[plane_index].htaps= 6;
180 s->plane[plane_index].hcoeff[0]= 40;
181 s->plane[plane_index].hcoeff[1]= -10;
182 s->plane[plane_index].hcoeff[2]= 2;
183 s->plane[plane_index].fast_mc= 1;
193 enc->qdsp.put_qpel_pixels_tab [0][dy+dx/4]=\
194 enc->qdsp.put_no_rnd_qpel_pixels_tab[0][dy+dx/4]=\
195 s->h264qpel.put_h264_qpel_pixels_tab[0][dy+dx/4];\
196 enc->qdsp.put_qpel_pixels_tab [1][dy+dx/4]=\
197 enc->qdsp.put_no_rnd_qpel_pixels_tab[1][dy+dx/4]=\
198 s->h264qpel.put_h264_qpel_pixels_tab[1][dy+dx/4];
263 s->colorspace_type= 0;
267 s->colorspace_type = 1;
285 if (!
s->input_picture)
292 int size=
s->b_width *
s->b_height << 2*
s->block_max_depth;
293 for(
i=0;
i<
s->max_ref_frames;
i++){
296 if (!
s->ref_mvs[
i] || !
s->ref_scores[
i])
305 static int pix_sum(
const uint8_t * pix,
int line_size,
int w,
int h)
310 for (
i = 0;
i <
h;
i++) {
311 for (j = 0; j <
w; j++) {
315 pix += line_size -
w;
321 static int pix_norm1(
const uint8_t * pix,
int line_size,
int w)
327 for (
i = 0;
i <
w;
i++) {
328 for (j = 0; j <
w; j ++) {
332 pix += line_size -
w;
364 #define P_TOPRIGHT P[3]
365 #define P_MEDIAN P[4]
367 #define FLAG_QPEL 1 //must be 1
373 uint8_t p_buffer[1024];
374 uint8_t i_buffer[1024];
375 uint8_t p_state[
sizeof(
s->block_state)];
376 uint8_t i_state[
sizeof(
s->block_state)];
378 uint8_t *pbbak=
s->c.bytestream;
379 uint8_t *pbbak_start=
s->c.bytestream_start;
380 int score, score2, iscore, i_len, p_len, block_s, sum, base_bits;
381 const int w=
s->b_width <<
s->block_max_depth;
382 const int h=
s->b_height <<
s->block_max_depth;
383 const int rem_depth=
s->block_max_depth -
level;
384 const int index= (x + y*
w) << rem_depth;
386 int trx= (x+1)<<rem_depth;
387 int try= (y+1)<<rem_depth;
394 int pl =
left->color[0];
395 int pcb=
left->color[1];
396 int pcr=
left->color[2];
400 const int stride=
s->current_picture->linesize[0];
401 const int uvstride=
s->current_picture->linesize[1];
402 const uint8_t *
const current_data[3] = {
s->input_picture->data[0] + (x + y*
stride)*block_w,
403 s->input_picture->data[1] + ((x*block_w)>>
s->chroma_h_shift) + ((y*uvstride*block_w)>>
s->chroma_v_shift),
404 s->input_picture->data[2] + ((x*block_w)>>
s->chroma_h_shift) + ((y*uvstride*block_w)>>
s->chroma_v_shift)};
406 int16_t last_mv[3][2];
408 const int shift= 1+qpel;
413 int ref, best_ref, ref_score, ref_mx, ref_my;
418 set_blocks(
s,
level, x, y, pl, pcb, pcr, 0, 0, 0,
BLOCK_INTRA);
431 last_mv[0][0]=
s->block[
index].mx;
432 last_mv[0][1]=
s->block[
index].my;
433 last_mv[1][0]= right->
mx;
434 last_mv[1][1]= right->
my;
435 last_mv[2][0]= bottom->
mx;
436 last_mv[2][1]= bottom->
my;
451 c->xmin = - x*block_w - 16+3;
452 c->ymin = - y*block_w - 16+3;
453 c->xmax = - (x+1)*block_w + (
w<<(
LOG2_MB_SIZE -
s->block_max_depth)) + 16-3;
454 c->ymax = - (y+1)*block_w + (
h<<(
LOG2_MB_SIZE -
s->block_max_depth)) + 16-3;
483 init_ref(
c, current_data,
s->last_picture[
ref]->data,
NULL, block_w*x, block_w*y, 0);
493 ref_score=
c->sub_motion_search(&enc->
m, &ref_mx, &ref_my, ref_score, 0, 0,
level-
LOG2_MB_SIZE+4, block_w);
501 if(score > ref_score){
511 base_bits=
get_rac_count(&
s->c) - 8*(
s->c.bytestream -
s->c.bytestream_start);
514 pc.bytestream= p_buffer;
515 memcpy(p_state,
s->block_state,
sizeof(
s->block_state));
517 if(
level!=
s->block_max_depth)
518 put_rac(&pc, &p_state[4 + s_context], 1);
520 if(
s->ref_frames > 1)
521 put_symbol(&pc, &p_state[128 + 1024 + 32*ref_context], best_ref, 0);
523 put_symbol(&pc, &p_state[128 + 32*(mx_context + 16*!!best_ref)], mx - pmx, 1);
524 put_symbol(&pc, &p_state[128 + 32*(my_context + 16*!!best_ref)], my - pmy, 1);
525 p_len= pc.bytestream - pc.bytestream_start;
528 block_s= block_w*block_w;
530 l= (sum + block_s/2)/block_s;
531 iscore =
pix_norm1(current_data[0],
stride, block_w) - 2*l*sum + l*l*block_s;
533 if (
s->nb_planes > 2) {
534 block_s= block_w*block_w>>(
s->chroma_h_shift +
s->chroma_v_shift);
535 sum =
pix_sum(current_data[1], uvstride, block_w>>
s->chroma_h_shift, block_w>>
s->chroma_v_shift);
536 cb= (sum + block_s/2)/block_s;
538 sum =
pix_sum(current_data[2], uvstride, block_w>>
s->chroma_h_shift, block_w>>
s->chroma_v_shift);
539 cr= (sum + block_s/2)/block_s;
546 ic.bytestream= i_buffer;
547 memcpy(i_state,
s->block_state,
sizeof(
s->block_state));
548 if(
level!=
s->block_max_depth)
549 put_rac(&ic, &i_state[4 + s_context], 1);
552 if (
s->nb_planes > 2) {
556 i_len= ic.bytestream - ic.bytestream_start;
559 av_assert1(iscore < 255*255*256 + enc->lambda2*10);
565 int varc= iscore >> 8;
566 int vard= score >> 8;
567 if (vard <= 64 || vard < varc)
570 c->scene_change_score += enc->
m.
qscale;
573 if(
level!=
s->block_max_depth){
574 put_rac(&
s->c, &
s->block_state[4 + s_context], 0);
581 if(score2 < score && score2 < iscore)
587 memcpy(pbbak, i_buffer, i_len);
589 s->c.bytestream_start= pbbak_start;
590 s->c.bytestream= pbbak + i_len;
591 set_blocks(
s,
level, x, y, l,
cb,
cr, pmx, pmy, 0,
BLOCK_INTRA);
592 memcpy(
s->block_state, i_state,
sizeof(
s->block_state));
595 memcpy(pbbak, p_buffer, p_len);
597 s->c.bytestream_start= pbbak_start;
598 s->c.bytestream= pbbak + p_len;
599 set_blocks(
s,
level, x, y, pl, pcb, pcr, mx, my, best_ref, 0);
600 memcpy(
s->block_state, p_state,
sizeof(
s->block_state));
606 const int w=
s->b_width <<
s->block_max_depth;
607 const int rem_depth=
s->block_max_depth -
level;
608 const int index= (x + y*
w) << rem_depth;
609 int trx= (x+1)<<rem_depth;
615 int pl =
left->color[0];
616 int pcb=
left->color[1];
617 int pcr=
left->color[2];
625 set_blocks(
s,
level, x, y, pl, pcb, pcr, 0, 0, 0,
BLOCK_INTRA);
629 if(
level!=
s->block_max_depth){
631 put_rac(&
s->c, &
s->block_state[4 + s_context], 1);
633 put_rac(&
s->c, &
s->block_state[4 + s_context], 0);
645 if (
s->nb_planes > 2) {
649 set_blocks(
s,
level, x, y,
b->color[0],
b->color[1],
b->color[2], pmx, pmy, 0,
BLOCK_INTRA);
653 if(
s->ref_frames > 1)
654 put_symbol(&
s->c, &
s->block_state[128 + 1024 + 32*ref_context],
b->ref, 0);
655 put_symbol(&
s->c, &
s->block_state[128 + 32*mx_context],
b->mx - pmx, 1);
656 put_symbol(&
s->c, &
s->block_state[128 + 32*my_context],
b->my - pmy, 1);
657 set_blocks(
s,
level, x, y, pl, pcb, pcr,
b->mx,
b->my,
b->ref, 0);
665 Plane *p= &
s->plane[plane_index];
666 const int block_size =
MB_SIZE >>
s->block_max_depth;
667 const int block_w = plane_index ? block_size>>
s->chroma_h_shift : block_size;
668 const int block_h = plane_index ? block_size>>
s->chroma_v_shift : block_size;
670 const int obmc_stride= plane_index ? (2*block_size)>>
s->chroma_h_shift : 2*block_size;
671 const int ref_stride=
s->current_picture->linesize[plane_index];
672 const uint8_t *
src =
s->input_picture->data[plane_index];
674 const int b_stride =
s->b_width <<
s->block_max_depth;
677 int index= mb_x + mb_y*b_stride;
686 b->color[plane_index]= 0;
687 memset(dst, 0, obmc_stride*obmc_stride*
sizeof(
IDWTELEM));
690 int mb_x2= mb_x + (
i &1) - 1;
691 int mb_y2= mb_y + (
i>>1) - 1;
692 int x= block_w*mb_x2 + block_w/2;
693 int y= block_h*mb_y2 + block_h/2;
696 x, y, block_w, block_h,
w,
h, obmc_stride, ref_stride, obmc_stride, mb_x2, mb_y2, 0, 0, plane_index);
698 for(y2=
FFMAX(y, 0); y2<
FFMIN(
h, y+block_h); y2++){
699 for(x2=
FFMAX(x, 0); x2<
FFMIN(
w, x+block_w); x2++){
700 int index= x2-(block_w*mb_x - block_w/2) + (y2-(block_h*mb_y - block_h/2))*obmc_stride;
701 int obmc_v= obmc[
index];
703 if(y<0) obmc_v += obmc[
index + block_h*obmc_stride];
704 if(x<0) obmc_v += obmc[
index + block_w];
705 if(y+block_h>
h) obmc_v += obmc[
index - block_h*obmc_stride];
706 if(x+block_w>
w) obmc_v += obmc[
index - block_w];
712 aa += obmc_v * obmc_v;
722 const int b_stride =
s->b_width <<
s->block_max_depth;
723 const int b_height =
s->b_height<<
s->block_max_depth;
724 int index= x + y*b_stride;
734 if(x<0 || x>=b_stride || y>=b_height)
760 int plane_index, uint8_t (*obmc_edged)[
MB_SIZE * 2])
763 Plane *p= &
s->plane[plane_index];
764 const int block_size =
MB_SIZE >>
s->block_max_depth;
765 const int block_w = plane_index ? block_size>>
s->chroma_h_shift : block_size;
766 const int block_h = plane_index ? block_size>>
s->chroma_v_shift : block_size;
767 const int obmc_stride= plane_index ? (2*block_size)>>
s->chroma_h_shift : 2*block_size;
768 const int ref_stride=
s->current_picture->linesize[plane_index];
769 uint8_t *dst=
s->current_picture->data[plane_index];
770 const uint8_t *
src =
s->input_picture->data[plane_index];
772 uint8_t *cur =
s->scratchbuf;
773 uint8_t *
tmp =
s->emu_edge_buffer;
774 const int b_stride =
s->b_width <<
s->block_max_depth;
775 const int b_height =
s->b_height<<
s->block_max_depth;
781 int sx= block_w*mb_x - block_w/2;
782 int sy= block_h*mb_y - block_h/2;
783 int x0=
FFMAX(0,-sx);
784 int y0=
FFMAX(0,-sy);
785 int x1=
FFMIN(block_w*2,
w-sx);
786 int y1=
FFMIN(block_h*2,
h-sy);
791 ff_snow_pred_block(
s, cur,
tmp, ref_stride, sx, sy, block_w*2, block_h*2, &
s->block[mb_x + mb_y*b_stride], plane_index,
w,
h);
793 for(y=y0; y<y1; y++){
794 const uint8_t *obmc1= obmc_edged[y];
796 uint8_t *cur1 = cur + y*ref_stride;
797 uint8_t *dst1 = dst + sx + (sy+y)*ref_stride;
798 for(x=x0; x<x1; x++){
799 #if FRAC_BITS >= LOG2_OBMC_MAX
805 if(v&(~255)) v= ~(v>>31);
812 && (mb_x == 0 || mb_x == b_stride-1)
813 && (mb_y == 0 || mb_y == b_height-1)){
823 memcpy(dst + sx+x0 + (sy+y)*ref_stride, cur + x0 + y*ref_stride, x1-x0);
834 distortion =
ff_w97_32_c(&enc->
m,
src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
836 distortion =
ff_w53_32_c(&enc->
m,
src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
840 int off = sx+16*(
i&1) + (sy+16*(
i>>1))*ref_stride;
841 distortion += enc->
mecc.
me_cmp[0](&enc->
m,
src + off, dst + off, ref_stride, 16);
846 distortion = enc->
mecc.
me_cmp[0](&enc->
m,
src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
857 if(mb_x == b_stride-2)
860 return distortion + rate*penalty_factor;
867 Plane *p= &
s->plane[plane_index];
868 const int block_size =
MB_SIZE >>
s->block_max_depth;
869 const int block_w = plane_index ? block_size>>
s->chroma_h_shift : block_size;
870 const int block_h = plane_index ? block_size>>
s->chroma_v_shift : block_size;
872 const int obmc_stride= plane_index ? (2*block_size)>>
s->chroma_h_shift : 2*block_size;
873 const int ref_stride=
s->current_picture->linesize[plane_index];
874 uint8_t *dst=
s->current_picture->data[plane_index];
875 const uint8_t *
src =
s->input_picture->data[plane_index];
879 const int b_stride =
s->b_width <<
s->block_max_depth;
889 int mb_x2= mb_x + (
i%3) - 1;
890 int mb_y2= mb_y + (
i/3) - 1;
891 int x= block_w*mb_x2 + block_w/2;
892 int y= block_h*mb_y2 + block_h/2;
895 x, y, block_w, block_h,
w,
h, 0, ref_stride, obmc_stride, mb_x2, mb_y2, 1, 1, plane_index);
898 for(y2= y; y2<0; y2++)
899 memcpy(dst + x + y2*ref_stride,
src + x + y2*ref_stride, block_w);
900 for(y2=
h; y2<y+block_h; y2++)
901 memcpy(dst + x + y2*ref_stride,
src + x + y2*ref_stride, block_w);
903 for(y2= y; y2<y+block_h; y2++)
904 memcpy(dst + x + y2*ref_stride,
src + x + y2*ref_stride, -x);
907 for(y2= y; y2<y+block_h; y2++)
908 memcpy(dst +
w + y2*ref_stride,
src +
w + y2*ref_stride, x+block_w -
w);
912 distortion += enc->
mecc.
me_cmp[block_w==8](&enc->
m,
src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
926 for(
i=merged?4:0;
i<9;
i++){
927 static const int dxy[9][2] = {{0,0},{1,0},{0,1},{1,1},{2,0},{2,1},{-1,2},{0,2},{1,2}};
931 return distortion + rate*penalty_factor;
935 const int w=
b->width;
936 const int h=
b->height;
941 int *runs =
s->run_buffer;
948 int l=0, lt=0, t=0, rt=0;
970 if(px<b->parent->width && py<b->parent->height)
971 p= parent[px + py*2*
stride];
975 runs[run_index++]=
run;
983 max_index= run_index;
984 runs[run_index++]=
run;
986 run= runs[run_index++];
989 if(run_index <= max_index)
993 if(
s->c.bytestream_end -
s->c.bytestream <
w*40){
999 int l=0, lt=0, t=0, rt=0;
1021 if(px<b->parent->width && py<b->parent->height)
1022 p= parent[px + py*2*
stride];
1030 run= runs[run_index++];
1032 if(run_index <= max_index)
1042 int l2= 2*
FFABS(l) + (l<0);
1062 uint8_t (*obmc_edged)[
MB_SIZE * 2],
int *best_rd)
1065 const int b_stride=
s->b_width <<
s->block_max_depth;
1073 block->color[0] = p[0];
1074 block->color[1] = p[1];
1075 block->color[2] = p[2];
1093 int mb_x,
int mb_y,
int p0,
int p1,
1094 uint8_t (*obmc_edged)[
MB_SIZE * 2],
int *best_rd)
1097 const int b_stride =
s->b_width <<
s->block_max_depth;
1119 if (rd < *best_rd) {
1129 int p0,
int p1,
int ref,
int *best_rd)
1132 const int b_stride=
s->b_width <<
s->block_max_depth;
1141 backup[0] =
block[0];
1142 backup[1] =
block[1];
1143 backup[2] =
block[b_stride];
1144 backup[3] =
block[b_stride + 1];
1169 block[0]= backup[0];
1170 block[1]= backup[1];
1171 block[b_stride]= backup[2];
1172 block[b_stride+1]= backup[3];
1180 int pass, mb_x, mb_y;
1181 const int b_width =
s->b_width <<
s->block_max_depth;
1182 const int b_height=
s->b_height <<
s->block_max_depth;
1183 const int b_stride= b_width;
1188 uint8_t
state[
sizeof(
s->block_state)];
1189 memcpy(
state,
s->block_state,
sizeof(
s->block_state));
1190 for(mb_y= 0; mb_y<
s->b_height; mb_y++)
1191 for(mb_x= 0; mb_x<
s->b_width; mb_x++)
1194 memcpy(
s->block_state,
state,
sizeof(
s->block_state));
1197 for(pass=0; pass<25; pass++){
1200 for(mb_y= 0; mb_y<b_height; mb_y++){
1201 for(mb_x= 0; mb_x<b_width; mb_x++){
1202 int dia_change,
i, j,
ref;
1203 int best_rd= INT_MAX, ref_rd;
1205 const int index= mb_x + mb_y * b_stride;
1215 const int b_w= (
MB_SIZE >>
s->block_max_depth);
1231 for (y = 0; y < b_w * 2; y++)
1232 memcpy(obmc_edged[y],
ff_obmc_tab[
s->block_max_depth] + y * b_w * 2, b_w * 2);
1234 for(y=0; y<b_w*2; y++)
1235 memset(obmc_edged[y], obmc_edged[y][0] + obmc_edged[y][b_w-1], b_w);
1236 if(mb_x==b_stride-1)
1237 for(y=0; y<b_w*2; y++)
1238 memset(obmc_edged[y]+b_w, obmc_edged[y][b_w] + obmc_edged[y][b_w*2-1], b_w);
1240 for(x=0; x<b_w*2; x++)
1241 obmc_edged[0][x] += obmc_edged[b_w-1][x];
1242 for(y=1; y<b_w; y++)
1243 memcpy(obmc_edged[y], obmc_edged[0], b_w*2);
1245 if(mb_y==b_height-1){
1246 for(x=0; x<b_w*2; x++)
1247 obmc_edged[b_w*2-1][x] += obmc_edged[b_w][x];
1248 for(y=b_w; y<b_w*2-1; y++)
1249 memcpy(obmc_edged[y], obmc_edged[b_w*2-1], b_w*2);
1254 if(mb_x==0 || mb_y==0 || mb_x==b_width-1 || mb_y==b_height-1){
1255 const uint8_t *
src =
s->input_picture->data[0];
1256 uint8_t *dst=
s->current_picture->data[0];
1257 const int stride=
s->current_picture->linesize[0];
1258 const int block_w=
MB_SIZE >>
s->block_max_depth;
1259 const int block_h=
MB_SIZE >>
s->block_max_depth;
1260 const int sx= block_w*mb_x - block_w/2;
1261 const int sy= block_h*mb_y - block_h/2;
1262 const int w=
s->plane[0].width;
1263 const int h=
s->plane[0].height;
1268 for(y=
h; y<sy+block_h*2; y++)
1271 for(y=sy; y<sy+block_h*2; y++)
1274 if(sx+block_w*2 >
w){
1275 for(y=sy; y<sy+block_h*2; y++)
1281 for(
i=0;
i <
s->nb_planes;
i++)
1294 int16_t (*mvr)[2]= &
s->ref_mvs[
ref][
index];
1303 check_block_inter(enc, mb_x, mb_y, mvr[-b_stride][0], mvr[-b_stride][1], obmc_edged, &best_rd);
1305 check_block_inter(enc, mb_x, mb_y, mvr[-1][0], mvr[-1][1], obmc_edged, &best_rd);
1309 check_block_inter(enc, mb_x, mb_y, mvr[b_stride][0], mvr[b_stride][1], obmc_edged, &best_rd);
1314 int newx =
block->mx;
1315 int newy =
block->my;
1318 for(
i=0;
i < dia_size;
i++){
1320 dia_change |=
check_block_inter(enc, mb_x, mb_y, newx+4*(
i-j), newy+(4*j), obmc_edged, &best_rd);
1321 dia_change |=
check_block_inter(enc, mb_x, mb_y, newx-4*(
i-j), newy-(4*j), obmc_edged, &best_rd);
1322 dia_change |=
check_block_inter(enc, mb_x, mb_y, newx-(4*j), newy+4*(
i-j), obmc_edged, &best_rd);
1323 dia_change |=
check_block_inter(enc, mb_x, mb_y, newx+(4*j), newy-4*(
i-j), obmc_edged, &best_rd);
1329 static const int square[8][2]= {{+1, 0},{-1, 0},{ 0,+1},{ 0,-1},{+1,+1},{-1,-1},{+1,-1},{-1,+1},};
1336 mvr[0][0]=
block->mx;
1337 mvr[0][1]=
block->my;
1338 if(ref_rd > best_rd){
1365 if(
s->block_max_depth == 1){
1367 for(mb_y= 0; mb_y<b_height; mb_y+=2){
1368 for(mb_x= 0; mb_x<b_width; mb_x+=2){
1370 int best_rd, init_rd;
1371 const int index= mb_x + mb_y * b_stride;
1376 b[2]=
b[0]+b_stride;
1391 (
b[0]->mx +
b[1]->mx +
b[2]->mx +
b[3]->mx + 2) >> 2,
1392 (
b[0]->my +
b[1]->my +
b[2]->my +
b[3]->my + 2) >> 2, 0, &best_rd);
1398 if(init_rd != best_rd)
1431 const int w=
b->width;
1432 const int h=
b->height;
1435 int x,y, thres1, thres2;
1453 if((
unsigned)(
i+thres1) > thres2){
1473 if((
unsigned)(
i+thres1) > thres2){
1492 const int w=
b->width;
1493 const int h=
b->height;
1514 const int w=
b->width;
1515 const int h=
b->height;
1518 for(y=
h-1; y>=0; y--){
1519 for(x=
w-1; x>=0; x--){
1538 const int w=
b->width;
1539 const int h=
b->height;
1562 int plane_index,
level, orientation;
1564 for(plane_index=0; plane_index<
FFMIN(
s->nb_planes, 2); plane_index++){
1566 for(orientation=
level ? 1:0; orientation<4; orientation++){
1567 if(orientation==2)
continue;
1568 put_symbol(&
s->c,
s->header_state,
s->plane[plane_index].band[
level][orientation].qlog, 1);
1578 memset(kstate,
MID_STATE,
sizeof(kstate));
1581 if(
s->keyframe ||
s->always_reset){
1583 s->last_spatial_decomposition_type=
1587 s->last_block_max_depth= 0;
1588 for(plane_index=0; plane_index<2; plane_index++){
1589 Plane *p= &
s->plane[plane_index];
1597 put_rac(&
s->c,
s->header_state,
s->always_reset);
1598 put_symbol(&
s->c,
s->header_state,
s->temporal_decomposition_type, 0);
1599 put_symbol(&
s->c,
s->header_state,
s->temporal_decomposition_count, 0);
1600 put_symbol(&
s->c,
s->header_state,
s->spatial_decomposition_count, 0);
1602 if (
s->nb_planes > 2) {
1606 put_rac(&
s->c,
s->header_state,
s->spatial_scalability);
1615 for(plane_index=0; plane_index<
FFMIN(
s->nb_planes, 2); plane_index++){
1616 Plane *p= &
s->plane[plane_index];
1621 put_rac(&
s->c,
s->header_state, update_mc);
1623 for(plane_index=0; plane_index<
FFMIN(
s->nb_planes, 2); plane_index++){
1624 Plane *p= &
s->plane[plane_index];
1631 if(
s->last_spatial_decomposition_count !=
s->spatial_decomposition_count){
1633 put_symbol(&
s->c,
s->header_state,
s->spatial_decomposition_count, 0);
1639 put_symbol(&
s->c,
s->header_state,
s->spatial_decomposition_type -
s->last_spatial_decomposition_type, 1);
1641 put_symbol(&
s->c,
s->header_state,
s->mv_scale -
s->last_mv_scale, 1);
1642 put_symbol(&
s->c,
s->header_state,
s->qbias -
s->last_qbias , 1);
1643 put_symbol(&
s->c,
s->header_state,
s->block_max_depth -
s->last_block_max_depth, 1);
1651 for(plane_index=0; plane_index<2; plane_index++){
1652 Plane *p= &
s->plane[plane_index];
1659 s->last_spatial_decomposition_type =
s->spatial_decomposition_type;
1660 s->last_qlog =
s->qlog;
1661 s->last_qbias =
s->qbias;
1662 s->last_mv_scale =
s->mv_scale;
1663 s->last_block_max_depth =
s->block_max_depth;
1664 s->last_spatial_decomposition_count =
s->spatial_decomposition_count;
1678 uint32_t coef_sum= 0;
1679 int level, orientation, delta_qlog;
1682 for(orientation=
level ? 1 : 0; orientation<4; orientation++){
1685 const int w=
b->width;
1686 const int h=
b->height;
1690 const int qdiv= (1<<16)/qmul;
1700 coef_sum+=
abs(buf[x+y*
stride]) * qdiv >> 16;
1706 coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
1721 s->qlog+= delta_qlog;
1728 int level, orientation, x, y;
1732 for(orientation=
level ? 1 : 0; orientation<4; orientation++){
1736 memset(
s->spatial_idwt_buffer, 0,
sizeof(*
s->spatial_idwt_buffer)*
width*
height);
1737 ibuf[
b->width/2 +
b->height/2*
b->stride]= 256*16;
1740 for(x=0; x<
width; x++){
1745 if (orientation == 2)
1748 if (orientation != 1)
1756 const AVFrame *pict,
int *got_packet)
1764 const int width=
s->avctx->width;
1765 const int height=
s->avctx->height;
1766 int level, orientation, plane_index,
i, y,
ret;
1767 uint8_t rc_header_bak[
sizeof(
s->header_state)];
1768 uint8_t rc_block_bak[
sizeof(
s->block_state)];
1776 for(
i=0;
i <
s->nb_planes;
i++){
1777 int hshift=
i ?
s->chroma_h_shift : 0;
1778 int vshift=
i ?
s->chroma_v_shift : 0;
1780 memcpy(&
s->input_picture->data[
i][y *
s->input_picture->linesize[
i]],
1790 pic =
s->input_picture;
1819 if (
s->current_picture->data[0]) {
1820 int w =
s->avctx->width;
1821 int h =
s->avctx->height;
1824 s->current_picture->linesize[0],
w ,
h ,
1826 if (
s->current_picture->data[2]) {
1828 s->current_picture->linesize[1],
w>>
s->chroma_h_shift,
h>>
s->chroma_v_shift,
1831 s->current_picture->linesize[2],
w>>
s->chroma_h_shift,
h>>
s->chroma_v_shift,
1846 int block_width = (
width +15)>>4;
1847 int block_height= (
height+15)>>4;
1848 int stride=
s->current_picture->linesize[0];
1855 mpv-> new_picture =
s->input_picture;
1858 mpv->
uvlinesize =
s->current_picture->linesize[1];
1880 mpv->
hdsp =
s->hdsp;
1882 s->hdsp = mpv->
hdsp;
1887 memcpy(rc_header_bak,
s->header_state,
sizeof(
s->header_state));
1888 memcpy(rc_block_bak,
s->block_state,
sizeof(
s->block_state));
1893 s->spatial_decomposition_count= 5;
1895 while( !(
width >>(
s->chroma_h_shift +
s->spatial_decomposition_count))
1896 || !(
height>>(
s->chroma_v_shift +
s->spatial_decomposition_count)))
1897 s->spatial_decomposition_count--;
1899 if (
s->spatial_decomposition_count <= 0) {
1909 if(
s->last_spatial_decomposition_count !=
s->spatial_decomposition_count){
1910 for(plane_index=0; plane_index <
s->nb_planes; plane_index++){
1916 mpv->
misc_bits = 8 * (
s->c.bytestream -
s->c.bytestream_start);
1920 for(plane_index=0; plane_index < s->nb_planes; plane_index++){
1921 Plane *p= &
s->plane[plane_index];
1929 if(pict->
data[plane_index])
1963 ff_spatial_dwt(
s->spatial_dwt_buffer,
s->temp_dwt_buffer,
w,
h,
w,
s->spatial_decomposition_type,
s->spatial_decomposition_count);
1965 if (enc->
pass1_rc && plane_index==0) {
1967 if (delta_qlog <= INT_MIN)
1972 memcpy(
s->header_state, rc_header_bak,
sizeof(
s->header_state));
1973 memcpy(
s->block_state, rc_block_bak,
sizeof(
s->block_state));
1980 for(orientation=
level ? 1 : 0; orientation<4; orientation++){
1995 for(orientation=
level ? 1 : 0; orientation<4; orientation++){
2002 ff_spatial_idwt(
s->spatial_idwt_buffer,
s->temp_idwt_buffer,
w,
h,
w,
s->spatial_decomposition_type,
s->spatial_decomposition_count);
2006 s->spatial_idwt_buffer[y*
w + x] *= 1 <<
FRAC_BITS;
2016 s->current_picture->data[plane_index][y*
s->current_picture->linesize[plane_index] + x]=
2017 pict->
data[plane_index][y*pict->
linesize[plane_index] + x];
2021 memset(
s->spatial_idwt_buffer, 0,
sizeof(
IDWTELEM)*
w*
h);
2028 if(pict->
data[plane_index])
2031 int d=
s->current_picture->data[plane_index][y*
s->current_picture->linesize[plane_index] + x] - pict->
data[plane_index][y*pict->
linesize[plane_index] + x];
2035 s->avctx->error[plane_index] +=
error;
2046 s->current_picture->pict_type = pic->
pict_type;
2047 s->current_picture->quality = pic->
quality;
2048 mpv->
frame_bits = 8 * (
s->c.bytestream -
s->c.bytestream_start);
2050 mpv->
total_bits += 8*(
s->c.bytestream -
s->c.bytestream_start);
2066 s->current_picture->pict_type);
2103 #define OFFSET(x) offsetof(SnowEncContext, x)
2104 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2111 {
"memc_only",
"Only do ME/MC (I frames -> ref, P frame -> ME+MC).",
OFFSET(memc_only),
AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1,
VE },
2112 {
"no_bitstream",
"Skip final bitstream writeout.",
OFFSET(no_bitstream),
AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1,
VE },
2113 {
"intra_penalty",
"Penalty for intra blocks in block decission",
OFFSET(intra_penalty),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX,
VE },
2114 {
"iterative_dia_size",
"Dia size for the iterative ME",
OFFSET(iterative_dia_size),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX,
VE },
2115 {
"sc_threshold",
"Scene change threshold",
OFFSET(scenechange_threshold),
AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX,
VE },
2119 {
"rc_eq",
"Set rate control equation. When computing the expression, besides the standard functions "
2120 "defined in the section 'Expression Evaluation', the following functions are available: "
2121 "bits2qp(bits), qp2bits(qp). Also the following constants are available: iTex pTex tex mv "
2122 "fCode iCount mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex avgTex.",
static void error(const char *err)
static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
static void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type)
AVPixelFormat
Pixel format.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
uint8_t * obmc_scratchpad
unsigned int lambda
Lagrange multiplier used in rate distortion.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static int ratecontrol_1pass(SnowEncContext *enc, AVFrame *pict)
static double cb(void *priv, double x, double y)
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
static av_cold int encode_end(AVCodecContext *avctx)
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
int scenechange_threshold
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Motion estimation context.
#define AV_CODEC_CAP_ENCODER_RECON_FRAME
The encoder is able to output reconstructed frame data, i.e.
Picture current_picture
copy of the current picture structure.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
int mb_num
number of MBs of a picture
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
static void update_last_header_values(SnowContext *s)
static void iterative_me(SnowEncContext *enc)
static int get_penalty_factor(int lambda, int lambda2, int type)
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation)
int unrestricted_mv
mv can point outside of the coded picture
struct AVCodecContext * avctx
av_cold int ff_rate_control_init(MpegEncContext *s)
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pict, int *got_packet)
int height
picture size. must be a multiple of 16
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
#define FF_INPUT_BUFFER_MIN_SIZE
Used by some encoders as upper bound for the length of headers.
av_cold void ff_snow_common_end(SnowContext *s)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void ff_spatial_dwt(DWTELEM *buffer, DWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
enum OutputFormat out_format
output format
uint8_t type
Bitfield of BLOCK_*.
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
static av_always_inline int check_4block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, int ref, int *best_rd)
int mb_height
number of MBs horizontally & vertically
AVCodec p
The public AVCodec.
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height, int stride, int type, int decomposition_count)
unsigned me_cache_generation
static void encode_blocks(SnowEncContext *enc, int search)
av_cold void ff_init_range_encoder(RangeCoder *c, uint8_t *buf, int buf_size)
ptrdiff_t linesize
line size, in bytes, may be different from width
int refs
number of reference frames
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
static av_always_inline int check_block_intra(SnowEncContext *enc, int mb_x, int mb_y, int p[3], uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
int flags
AV_CODEC_FLAG_*.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride, int sx, int sy, int b_w, int b_h, const BlockNode *block, int plane_index, int w, int h)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
#define FF_CODEC_ENCODE_CB(func)
int64_t mb_var_sum
sum of MB variance for current frame
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
int ff_snow_common_init_after_header(AVCodecContext *avctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
int global_quality
Global quality for codecs which cannot change it per frame.
#define AV_CEIL_RSHIFT(a, b)
#define BLOCK_OPT
Block needs no checks in this round of iterative motion estiation.
static void calculate_visual_weight(SnowContext *s, Plane *p)
#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
This encoder can reorder user opaque values from input AVFrames and return them with corresponding ou...
int64_t bit_rate
wanted bit rate
#define av_assert0(cond)
assert() equivalent, that is always enabled.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index)
int frame_bits
bits used for the current frame
static int pix_norm1(const uint8_t *pix, int line_size, int w)
av_cold int ff_snow_common_init(AVCodecContext *avctx)
static int get_encode_buffer(SnowContext *s, AVFrame *frame)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
uint64_t encoding_error[SNOW_MAX_PLANES]
#define CODEC_LONG_NAME(str)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
#define LIBAVUTIL_VERSION_INT
void ff_write_pass1_stats(MpegEncContext *s)
Describe the class of an AVClass context structure.
int ff_epzs_motion_search(struct MpegEncContext *s, int *mx_ptr, int *my_ptr, int P[10][2], int src_index, int ref_index, const int16_t(*last_mv)[2], int ref_mv_scale, int size, int h)
unsigned me_cache[ME_CACHE_SIZE]
int f_code
forward MV resolution
static int bias(int x, int c)
int16_t my
Motion vector component Y, see mv_scale.
static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_index, uint8_t(*obmc_edged)[MB_SIZE *2])
struct AVCodecInternal * internal
Private context used for internal data.
int64_t bit_rate
the average bitrate
int ff_rac_terminate(RangeCoder *c, int version)
Terminates the range coder.
#define ROUNDED_DIV(a, b)
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
void ff_snow_release_buffer(AVCodecContext *avctx)
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
int ff_w53_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
Picture * last_picture_ptr
pointer to the previous picture.
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static void init_ref(MotionEstContext *c, const uint8_t *const src[3], uint8_t *const ref[3], uint8_t *const ref2[3], int x, int y, int ref_index)
static void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed)
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
char * stats_out
pass1 encoding statistics output buffer
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
enum AVPictureType pict_type
Picture type of the frame.
int(* init)(AVBSFContext *ctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
int display_picture_number
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
static void encode_header(SnowContext *s)
static int shift(int a, int b)
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
int8_t last_hcoeff[HTAPS_MAX/2]
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
static int pix_sum(const uint8_t *pix, int line_size, int w, int h)
const FFCodec ff_snow_encoder
int quarter_sample
1->qpel, 0->half pel ME/MC
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
static void encode_q_branch2(SnowContext *s, int level, int x, int y)
int ff_get_mb_score(struct MpegEncContext *s, int mx, int my, int src_index, int ref_index, int size, int h, int add_rate)
const int8_t ff_quant3bA[256]
int8_t hcoeff[HTAPS_MAX/2]
const uint8_t *const ff_obmc_tab[4]
Picture * current_picture_ptr
pointer to the current picture
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
#define ENCODER_EXTRA_BITS
#define AV_CODEC_FLAG_RECON_FRAME
Request the encoder to output reconstructed frames, i.e. frames that would be produced by decoding th...
#define i(width, name, range_min, range_max)
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
static int get_block_bits(SnowContext *s, int x, int y, int w)
const uint32_t ff_square_tab[512]
#define BLOCK_INTRA
Intra block, inter otherwise.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static int qscale2qlog(int qscale)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
int dia_size
ME diamond size & shape.
void ff_h263_encode_init(MpegEncContext *s)
me_cmp_func me_sub_cmp[6]
int mb_lmin
minimum MB Lagrange multiplier
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
const uint8_t ff_qexp[QROOT]
static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add)
const char * name
Name of the codec implementation.
static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
int ff_init_me(MpegEncContext *s)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
av_cold void ff_rate_control_uninit(MpegEncContext *s)
void * av_calloc(size_t nmemb, size_t size)
uint32_t * score_map
map to store the scores
int motion_est
ME algorithm.
int64_t frame_num
Frame counter, set by libavcodec.
MpegvideoEncDSPContext mpvencdsp
static const float pred[4]
static float search(FOCContext *foc, int pass, int maxpass, int xmin, int xmax, int ymin, int ymax, int *best_x, int *best_y, float best_score)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
static av_cold int encode_init(AVCodecContext *avctx)
static const AVOption options[]
AVFrame * recon_frame
When the AV_CODEC_FLAG_RECON_FRAME flag is used.
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
void ff_snow_reset_contexts(SnowContext *s)
int qlog
log(qscale)/log[2^(1/6)]
static void encode_qlogs(SnowContext *s)
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
main external API structure.
static int ref[MAX_W *MAX_W]
static int get_rac_count(RangeCoder *c)
int mb_lmax
maximum MB Lagrange multiplier
static void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2)
Picture last_picture
copy of the previous picture structure.
uint32_t * map
map to avoid duplicate evaluations
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
@ AV_PICTURE_TYPE_P
Predicted.
static av_always_inline int same_block(BlockNode *a, BlockNode *b)
SubBand band[DWT_LEVELS_3D][4]
int16_t mx
Motion vector component X, see mv_scale.
int ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
This structure stores compressed data.
static double cr(void *priv, double x, double y)
int ff_snow_frames_prepare(SnowContext *s)
static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias)
static void dequantize(SnowContext *s, SubBand *b, IDWTELEM *src, int stride)
int ff_w97_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
int width
picture width / height.
static const BlockNode null_block
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
The exact code depends on how similar the blocks are and how related they are to the block
int misc_bits
cbp, mb_type
static const AVClass snowenc_class
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
static av_always_inline int check_block_inter(SnowEncContext *enc, int mb_x, int mb_y, int p0, int p1, uint8_t(*obmc_edged)[MB_SIZE *2], int *best_rd)
int ff_snow_alloc_blocks(SnowContext *s)
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
uint8_t ref
Reference frame index.
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.