Go to the documentation of this file.
41 #define DNX10BIT_QMAT_SHIFT 18
42 #define RC_VARIANCE 1 // use variance or ssd for fast rc
43 #define LAMBDA_FRAC_BITS 10
45 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
47 {
"nitris_compat",
"encode with Avid Nitris compatibility",
49 {
"ibias",
"intra quant bias",
51 { .i64 = 0 }, INT_MIN, INT_MAX,
VE },
56 0, 0,
VE,
"profile" },
58 0, 0,
VE,
"profile" },
60 0, 0,
VE,
"profile" },
62 0, 0,
VE,
"profile" },
64 0, 0,
VE,
"profile" },
66 0, 0,
VE,
"profile" },
82 for (
i = 0;
i < 4;
i++) {
105 memcpy(
block + 0 * 8, pixels + 0 * line_size, 8 *
sizeof(*
block));
106 memcpy(
block + 7 * 8, pixels + 0 * line_size, 8 *
sizeof(*
block));
107 memcpy(
block + 1 * 8, pixels + 1 * line_size, 8 *
sizeof(*
block));
108 memcpy(
block + 6 * 8, pixels + 1 * line_size, 8 *
sizeof(*
block));
109 memcpy(
block + 2 * 8, pixels + 2 * line_size, 8 *
sizeof(*
block));
110 memcpy(
block + 5 * 8, pixels + 2 * line_size, 8 *
sizeof(*
block));
111 memcpy(
block + 3 * 8, pixels + 3 * line_size, 8 *
sizeof(*
block));
112 memcpy(
block + 4 * 8, pixels + 3 * line_size, 8 *
sizeof(*
block));
118 int i, j,
level, last_non_zero, start_i;
123 unsigned int threshold1, threshold2;
130 qmat = n < 4 ?
ctx->q_intra_matrix[qscale] :
ctx->q_chroma_intra_matrix[qscale];
131 bias=
ctx->intra_quant_bias * (1 << (16 - 8));
132 threshold1 = (1 << 16) - bias - 1;
133 threshold2 = (threshold1 << 1);
135 for (
i = 63;
i >= start_i;
i--) {
139 if (((
unsigned)(
level + threshold1)) > threshold2) {
147 for (
i = start_i;
i <= last_non_zero;
i++) {
151 if (((
unsigned)(
level + threshold1)) > threshold2) {
171 return last_non_zero;
178 const int *qmat = n<4 ?
ctx->q_intra_matrix[qscale] :
ctx->q_chroma_intra_matrix[qscale];
179 int last_non_zero = 0;
187 for (
i = 1;
i < 64; ++
i) {
202 return last_non_zero;
208 int max_level = 1 << (
ctx->bit_depth + 2);
211 max_level, 4 *
sizeof(*
ctx->orig_vlc_codes),
fail);
213 max_level, 4 *
sizeof(*
ctx->orig_vlc_bits),
fail);
219 ctx->vlc_codes =
ctx->orig_vlc_codes + max_level * 2;
220 ctx->vlc_bits =
ctx->orig_vlc_bits + max_level * 2;
228 offset = (alevel - 1) >> 6;
231 for (j = 0; j < 257; j++) {
232 if (
ctx->cid_table->ac_info[2*j+0] >> 1 == alevel &&
234 (!
run || (
ctx->cid_table->ac_info[2*j+1] & 2) &&
run)) {
238 (
ctx->cid_table->ac_codes[j] << 1) | (sign & 1);
239 ctx->vlc_bits[
index] =
ctx->cid_table->ac_bits[j] + 1;
241 ctx->vlc_codes[
index] =
ctx->cid_table->ac_codes[j];
251 ctx->vlc_bits[
index] +=
ctx->cid_table->index_bits;
255 for (
i = 0;
i < 62;
i++) {
256 int run =
ctx->cid_table->run[
i];
258 ctx->run_codes[
run] =
ctx->cid_table->run_codes[
i];
259 ctx->run_bits[
run] =
ctx->cid_table->run_bits[
i];
269 uint16_t weight_matrix[64] = { 1, };
271 const uint8_t *luma_weight_table =
ctx->cid_table->luma_weight;
272 const uint8_t *chroma_weight_table =
ctx->cid_table->chroma_weight;
275 (
ctx->m.avctx->qmax + 1), 64 *
sizeof(
int),
fail);
277 (
ctx->m.avctx->qmax + 1), 64 *
sizeof(
int),
fail);
279 (
ctx->m.avctx->qmax + 1), 64 * 2 *
sizeof(uint16_t),
282 (
ctx->m.avctx->qmax + 1), 64 * 2 *
sizeof(uint16_t),
285 if (
ctx->bit_depth == 8) {
286 for (
i = 1;
i < 64;
i++) {
288 weight_matrix[j] =
ctx->cid_table->luma_weight[
i];
291 weight_matrix,
ctx->intra_quant_bias, 1,
292 ctx->m.avctx->qmax, 1);
293 for (
i = 1;
i < 64;
i++) {
295 weight_matrix[j] =
ctx->cid_table->chroma_weight[
i];
298 weight_matrix,
ctx->intra_quant_bias, 1,
299 ctx->m.avctx->qmax, 1);
301 for (qscale = 1; qscale <=
ctx->m.avctx->qmax; qscale++) {
302 for (
i = 0;
i < 64;
i++) {
303 ctx->qmatrix_l[qscale][
i] <<= 2;
304 ctx->qmatrix_c[qscale][
i] <<= 2;
305 ctx->qmatrix_l16[qscale][0][
i] <<= 2;
306 ctx->qmatrix_l16[qscale][1][
i] <<= 2;
307 ctx->qmatrix_c16[qscale][0][
i] <<= 2;
308 ctx->qmatrix_c16[qscale][1][
i] <<= 2;
313 for (qscale = 1; qscale <=
ctx->m.avctx->qmax; qscale++) {
314 for (
i = 1;
i < 64;
i++) {
329 (qscale * luma_weight_table[
i]);
331 (qscale * chroma_weight_table[
i]);
336 ctx->m.q_chroma_intra_matrix16 =
ctx->qmatrix_c16;
337 ctx->m.q_chroma_intra_matrix =
ctx->qmatrix_c;
338 ctx->m.q_intra_matrix16 =
ctx->qmatrix_l16;
339 ctx->m.q_intra_matrix =
ctx->qmatrix_l;
356 ctx->frame_bits = (
ctx->coding_unit_size -
357 ctx->data_offset - 4 -
ctx->min_padding) * 8;
381 "pixel format is incompatible with DNxHD\n");
390 "pixel format is incompatible with DNxHD profile\n");
396 "pixel format is incompatible with DNxHR HQX profile\n");
404 "pixel format is incompatible with DNxHR LB/SQ/HQ profile\n");
413 "video parameters incompatible with DNxHD. Valid DNxHD profiles:\n");
419 if (
ctx->cid >= 1270 &&
ctx->cid <= 1274)
424 "Input dimensions too small, input must be at least 256x120\n");
449 if (!
ctx->m.dct_quantize)
455 ctx->block_width_l2 = 4;
456 }
else if (
ctx->bit_depth == 10) {
459 ctx->block_width_l2 = 4;
462 ctx->block_width_l2 = 3;
473 ctx->m.mb_height /= 2;
478 "Interlaced encoding is not supported for DNxHR profiles.\n");
482 ctx->m.mb_num =
ctx->m.mb_height *
ctx->m.mb_width;
488 ctx->coding_unit_size =
ctx->frame_size;
490 ctx->frame_size =
ctx->cid_table->frame_size;
491 ctx->coding_unit_size =
ctx->cid_table->coding_unit_size;
494 if (
ctx->m.mb_height > 68)
495 ctx->data_offset = 0x170 + (
ctx->m.mb_height << 2);
497 ctx->data_offset = 0x280;
505 if (
ctx->nitris_compat)
506 ctx->min_padding = 1600;
514 ctx->m.mb_height *
sizeof(uint32_t),
fail);
516 ctx->m.mb_height *
sizeof(uint32_t),
fail);
518 ctx->m.mb_num *
sizeof(uint16_t),
fail);
522 #if FF_API_CODED_FRAME
560 memset(
buf, 0,
ctx->data_offset);
564 if (
ctx->cid >= 1270 &&
ctx->cid <= 1274)
569 buf[5] =
ctx->interlaced ?
ctx->cur_field + 2 : 0x01;
576 buf[0x21] =
ctx->bit_depth == 10 ? 0x58 : 0x38;
577 buf[0x22] = 0x88 + (
ctx->interlaced << 2);
602 (
ctx->cid_table->dc_codes[nbits] << nbits) +
603 av_mod_uintp2(
diff, nbits));
608 int last_index,
int n)
610 int last_non_zero = 0;
616 for (
i = 1;
i <= last_index;
i++) {
617 j =
ctx->m.intra_scantable.permutated[
i];
620 int run_level =
i - last_non_zero - 1;
621 int rlevel = slevel * (1 << 1) | !!run_level;
625 ctx->run_codes[run_level]);
634 int qscale,
int last_index)
641 weight_matrix = ((n % 6) < 2) ?
ctx->cid_table->luma_weight
642 :
ctx->cid_table->chroma_weight;
644 weight_matrix = (n & 2) ?
ctx->cid_table->chroma_weight
645 :
ctx->cid_table->luma_weight;
648 for (
i = 1;
i <= last_index;
i++) {
649 int j =
ctx->m.intra_scantable.permutated[
i];
653 level = (1 - 2 *
level) * qscale * weight_matrix[
i];
654 if (
ctx->bit_depth == 10) {
655 if (weight_matrix[
i] != 8)
659 if (weight_matrix[
i] != 32)
665 level = (2 *
level + 1) * qscale * weight_matrix[
i];
666 if (
ctx->bit_depth == 10) {
667 if (weight_matrix[
i] != 8)
671 if (weight_matrix[
i] != 32)
685 for (
i = 0;
i < 64;
i++)
693 int last_non_zero = 0;
696 for (
i = 1;
i <= last_index;
i++) {
697 j =
ctx->m.intra_scantable.permutated[
i];
700 int run_level =
i - last_non_zero - 1;
702 !!run_level] +
ctx->run_bits[run_level];
712 const int bs =
ctx->block_width_l2;
713 const int bw = 1 << bs;
714 int dct_y_offset =
ctx->dct_y_offset;
715 int dct_uv_offset =
ctx->dct_uv_offset;
716 int linesize =
ctx->m.linesize;
717 int uvlinesize =
ctx->m.uvlinesize;
718 const uint8_t *ptr_y =
ctx->thread[0]->src[0] +
719 ((mb_y << 4) *
ctx->m.linesize) + (mb_x << bs + 1);
720 const uint8_t *ptr_u =
ctx->thread[0]->src[1] +
721 ((mb_y << 4) *
ctx->m.uvlinesize) + (mb_x << bs +
ctx->is_444);
722 const uint8_t *ptr_v =
ctx->thread[0]->src[2] +
723 ((mb_y << 4) *
ctx->m.uvlinesize) + (mb_x << bs +
ctx->is_444);
728 (mb_y << 4) + 16 >
ctx->m.avctx->height)) {
729 int y_w =
ctx->m.avctx->width - (mb_x << 4);
730 int y_h =
ctx->m.avctx->height - (mb_y << 4);
731 int uv_w = (y_w + 1) / 2;
737 linesize,
ctx->m.linesize,
741 uvlinesize,
ctx->m.uvlinesize,
745 uvlinesize,
ctx->m.uvlinesize,
749 dct_y_offset = bw * linesize;
750 dct_uv_offset = bw * uvlinesize;
751 ptr_y = &
ctx->edge_buf_y[0];
752 ptr_u = &
ctx->edge_buf_uv[0][0];
753 ptr_v = &
ctx->edge_buf_uv[1][0];
755 (mb_y << 4) + 16 >
ctx->m.avctx->height)) {
756 int y_w =
ctx->m.avctx->width - (mb_x << 4);
757 int y_h =
ctx->m.avctx->height - (mb_y << 4);
758 int uv_w =
ctx->is_444 ? y_w : (y_w + 1) / 2;
761 uvlinesize = 16 + 16 *
ctx->is_444;
764 linesize,
ctx->m.linesize,
768 uvlinesize,
ctx->m.uvlinesize,
772 uvlinesize,
ctx->m.uvlinesize,
776 dct_y_offset = bw * linesize / 2;
777 dct_uv_offset = bw * uvlinesize / 2;
778 ptr_y = &
ctx->edge_buf_y[0];
779 ptr_u = &
ctx->edge_buf_uv[0][0];
780 ptr_v = &
ctx->edge_buf_uv[1][0];
789 if (mb_y + 1 ==
ctx->m.mb_height &&
ctx->m.avctx->height == 1080) {
790 if (
ctx->interlaced) {
791 ctx->get_pixels_8x4_sym(
ctx->blocks[4],
792 ptr_y + dct_y_offset,
794 ctx->get_pixels_8x4_sym(
ctx->blocks[5],
795 ptr_y + dct_y_offset + bw,
797 ctx->get_pixels_8x4_sym(
ctx->blocks[6],
798 ptr_u + dct_uv_offset,
800 ctx->get_pixels_8x4_sym(
ctx->blocks[7],
801 ptr_v + dct_uv_offset,
804 ctx->bdsp.clear_block(
ctx->blocks[4]);
805 ctx->bdsp.clear_block(
ctx->blocks[5]);
806 ctx->bdsp.clear_block(
ctx->blocks[6]);
807 ctx->bdsp.clear_block(
ctx->blocks[7]);
811 ptr_y + dct_y_offset, linesize);
813 ptr_y + dct_y_offset + bw, linesize);
815 ptr_u + dct_uv_offset, uvlinesize);
817 ptr_v + dct_uv_offset, uvlinesize);
822 pdsp->
get_pixels(
ctx->blocks[6], ptr_y + dct_y_offset, linesize);
823 pdsp->
get_pixels(
ctx->blocks[7], ptr_y + dct_y_offset + bw, linesize);
827 pdsp->
get_pixels(
ctx->blocks[8], ptr_u + dct_uv_offset, uvlinesize);
828 pdsp->
get_pixels(
ctx->blocks[9], ptr_u + dct_uv_offset + bw, uvlinesize);
832 pdsp->
get_pixels(
ctx->blocks[10], ptr_v + dct_uv_offset, uvlinesize);
833 pdsp->
get_pixels(
ctx->blocks[11], ptr_v + dct_uv_offset + bw, uvlinesize);
845 const static uint8_t component[8]={0,0,1,2,0,0,1,2};
852 int jobnr,
int threadnr)
855 int mb_y = jobnr, mb_x;
856 int qscale =
ctx->qscale;
858 ctx =
ctx->thread[threadnr];
862 ctx->m.last_dc[2] = 1 << (
ctx->bit_depth + 2);
864 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
865 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
873 for (
i = 0;
i < 8 + 4 *
ctx->is_444;
i++) {
874 int16_t *src_block =
ctx->blocks[
i];
878 memcpy(
block, src_block, 64 *
sizeof(*
block));
880 ctx->is_444 ? 4 * (n > 0): 4 & (2*
i),
891 dc_bits +=
ctx->cid_table->dc_bits[nbits] + nbits;
901 ctx->mb_rc[(qscale *
ctx->m.mb_num) +
mb].ssd = ssd;
902 ctx->mb_rc[(qscale *
ctx->m.mb_num) +
mb].
bits = ac_bits + dc_bits + 12 +
903 (1 +
ctx->is_444) * 8 *
ctx->vlc_bits[0];
909 int jobnr,
int threadnr)
912 int mb_y = jobnr, mb_x;
913 ctx =
ctx->thread[threadnr];
915 ctx->slice_size[jobnr]);
919 ctx->m.last_dc[2] = 1 << (
ctx->bit_depth + 2);
920 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
921 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
922 int qscale =
ctx->mb_qscale[
mb];
930 for (
i = 0;
i < 8 + 4 *
ctx->is_444;
i++) {
933 int last_index =
ctx->m.dct_quantize(&
ctx->m,
block,
934 ctx->is_444 ? (((
i >> 1) % 3) < 1 ? 0 : 4): 4 & (2*
i),
950 for (mb_y = 0; mb_y <
ctx->m.mb_height; mb_y++) {
953 ctx->slice_size[mb_y] = 0;
954 for (mb_x = 0; mb_x <
ctx->m.mb_width; mb_x++) {
955 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
956 ctx->slice_size[mb_y] +=
ctx->mb_bits[
mb];
958 ctx->slice_size[mb_y] = (
ctx->slice_size[mb_y] + 31) & ~31;
959 ctx->slice_size[mb_y] >>= 3;
960 thread_size =
ctx->slice_size[mb_y];
966 int jobnr,
int threadnr)
969 int mb_y = jobnr, mb_x, x, y;
970 int partial_last_row = (mb_y ==
ctx->m.mb_height - 1) &&
973 ctx =
ctx->thread[threadnr];
974 if (
ctx->bit_depth == 8) {
975 uint8_t *pix =
ctx->thread[0]->src[0] + ((mb_y << 4) *
ctx->m.linesize);
976 for (mb_x = 0; mb_x <
ctx->m.mb_width; ++mb_x, pix += 16) {
977 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
982 sum =
ctx->m.mpvencdsp.pix_sum(pix,
ctx->m.linesize);
983 varc =
ctx->m.mpvencdsp.pix_norm1(pix,
ctx->m.linesize);
988 for (y = 0; y < bh; y++) {
989 for (x = 0; x < bw; x++) {
996 varc = (varc - (((unsigned) sum * sum) >> 8) + 128) >> 8;
998 ctx->mb_cmp[
mb].value = varc;
1002 const int linesize =
ctx->m.linesize >> 1;
1003 for (mb_x = 0; mb_x <
ctx->m.mb_width; ++mb_x) {
1004 uint16_t *pix = (uint16_t *)
ctx->thread[0]->src[0] +
1005 ((mb_y << 4) * linesize) + (mb_x << 4);
1006 unsigned mb = mb_y *
ctx->m.mb_width + mb_x;
1014 for (
i = 0;
i < bh; ++
i) {
1015 for (j = 0; j < bw; ++j) {
1017 const int sample = (unsigned) pix[j] >> 6;
1025 sqmean = sqsum >> 8;
1026 ctx->mb_cmp[
mb].value = sqmean - mean * mean;
1035 int lambda, up_step, down_step;
1036 int last_lower = INT_MAX, last_higher = 0;
1045 lambda =
ctx->lambda;
1050 if (lambda == last_higher) {
1054 for (y = 0; y <
ctx->m.mb_height; y++) {
1055 for (x = 0; x <
ctx->m.mb_width; x++) {
1056 unsigned min = UINT_MAX;
1058 int mb = y *
ctx->m.mb_width + x;
1061 int i = (q*
ctx->m.mb_num) +
mb;
1062 unsigned score =
ctx->mb_rc[
i].bits * lambda +
1071 ctx->mb_qscale[
mb] = qscale;
1072 ctx->mb_bits[
mb] =
ctx->mb_rc[rc].bits;
1083 if (bits < ctx->frame_bits) {
1084 last_lower =
FFMIN(lambda, last_lower);
1085 if (last_higher != 0)
1086 lambda = (lambda+last_higher)>>1;
1088 lambda -= down_step;
1089 down_step =
FFMIN((int64_t)down_step*5, INT_MAX);
1091 lambda =
FFMAX(1, lambda);
1092 if (lambda == last_lower)
1095 last_higher =
FFMAX(lambda, last_higher);
1096 if (last_lower != INT_MAX)
1097 lambda = (lambda+last_lower)>>1;
1098 else if ((int64_t)lambda + up_step > INT_MAX)
1102 up_step =
FFMIN((int64_t)up_step*5, INT_MAX);
1106 ctx->lambda = lambda;
1115 int last_higher = 0;
1116 int last_lower = INT_MAX;
1120 qscale =
ctx->qscale;
1123 ctx->qscale = qscale;
1127 for (y = 0; y <
ctx->m.mb_height; y++) {
1128 for (x = 0; x <
ctx->m.mb_width; x++)
1134 if (bits < ctx->frame_bits) {
1137 if (last_higher == qscale - 1) {
1138 qscale = last_higher;
1141 last_lower =
FFMIN(qscale, last_lower);
1142 if (last_higher != 0)
1143 qscale = (qscale + last_higher) >> 1;
1145 qscale -= down_step++;
1150 if (last_lower == qscale + 1)
1152 last_higher =
FFMAX(qscale, last_higher);
1153 if (last_lower != INT_MAX)
1154 qscale = (qscale + last_lower) >> 1;
1156 qscale += up_step++;
1158 if (qscale >=
ctx->m.avctx->qmax)
1162 ctx->qscale = qscale;
1166 #define BUCKET_BITS 8
1167 #define RADIX_PASSES 4
1168 #define NBUCKETS (1 << BUCKET_BITS)
1183 int v =
data[
i].value;
1193 buckets[j][
i] =
offset -= buckets[j][
i];
1205 int pos = buckets[v]++;
1228 for (y = 0; y <
ctx->m.mb_height; y++) {
1229 for (x = 0; x <
ctx->m.mb_width; x++) {
1230 int mb = y *
ctx->m.mb_width + x;
1231 int rc = (
ctx->qscale *
ctx->m.mb_num ) +
mb;
1234 ctx->mb_bits[
mb] =
ctx->mb_rc[rc].bits;
1235 max_bits +=
ctx->mb_rc[rc].bits;
1237 delta_bits =
ctx->mb_rc[rc].bits -
1238 ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1240 ctx->mb_cmp[
mb].value =
1241 delta_bits ? ((
ctx->mb_rc[rc].ssd -
1242 ctx->mb_rc[rc +
ctx->m.mb_num].ssd) * 100) /
1254 for (x = 0; x <
ctx->m.mb_num && max_bits >
ctx->frame_bits; x++) {
1255 int mb =
ctx->mb_cmp[x].mb;
1256 int rc = (
ctx->qscale *
ctx->m.mb_num ) +
mb;
1257 max_bits -=
ctx->mb_rc[rc].bits -
1258 ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1259 ctx->mb_qscale[
mb] =
ctx->qscale + 1;
1260 ctx->mb_bits[
mb] =
ctx->mb_rc[rc +
ctx->m.mb_num].bits;
1270 for (
i = 0;
i <
ctx->m.avctx->thread_count;
i++) {
1271 ctx->thread[
i]->m.linesize =
frame->linesize[0] <<
ctx->interlaced;
1272 ctx->thread[
i]->m.uvlinesize =
frame->linesize[1] <<
ctx->interlaced;
1273 ctx->thread[
i]->dct_y_offset =
ctx->m.linesize *8;
1274 ctx->thread[
i]->dct_uv_offset =
ctx->m.uvlinesize*8;
1277 #if FF_API_CODED_FRAME
1279 ctx->m.avctx->coded_frame->interlaced_frame =
frame->interlaced_frame;
1282 ctx->cur_field =
frame->interlaced_frame && !
frame->top_field_first;
1300 for (
i = 0;
i < 3;
i++) {
1302 if (
ctx->interlaced &&
ctx->cur_field)
1314 "picture could not fit ratecontrol constraints, increase qmax\n");
1321 for (
i = 0;
i <
ctx->m.mb_height;
i++) {
1331 ctx->coding_unit_size - 4 -
offset -
ctx->data_offset);
1337 ctx->cur_field ^= 1;
1338 buf +=
ctx->coding_unit_size;
1339 goto encode_coding_unit;
1342 #if FF_API_CODED_FRAME
static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
#define FF_ENABLE_DEPRECATION_WARNINGS
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
static const AVOption options[]
AVPixelFormat
Pixel format.
static av_cold int init(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static av_always_inline void dnxhd_encode_block(DNXHDEncContext *ctx, int16_t *block, int last_index, int n)
static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, int16_t *block, int last_index)
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
#define MKTAG(a, b, c, d)
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static int dnxhd_10bit_dct_quantize_444(MpegEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
static av_cold int end(AVCodecContext *avctx)
int av_log2_16bit(unsigned v)
static void dnxhd_8bit_get_pixels_8x4_sym(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t line_size)
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define DNX10BIT_QMAT_SHIFT
#define MASK_ABS(mask, level)
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
int mb_decision
macroblock decision mode
int qmax
maximum quantizer
static av_cold int dnxhd_encode_end(AVCodecContext *avctx)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
#define AV_PIX_FMT_GBRP10
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
void ff_dnxhd_print_profiles(AVCodecContext *avctx, int loglevel)
int key_frame
1 -> keyframe, 0-> not
int flags
AV_CODEC_FLAG_*.
static double val(void *priv, double ch)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
int avpriv_dnxhd_get_hr_frame_size(int cid, int w, int h)
#define FF_PROFILE_DNXHR_LB
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
#define AV_PIX_FMT_YUV444P10
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static enum AVPixelFormat pix_fmts[]
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void radix_sort(RCCMPEntry *data, RCCMPEntry *tmp, int size)
const AVProfile ff_dnxhd_profiles[]
static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
#define FF_PROFILE_DNXHR_HQ
const CIDEntry ff_dnxhd_cid_table[]
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
av_cold void ff_mpv_idct_init(MpegEncContext *s)
static void radix_sort_pass(RCCMPEntry *dst, const RCCMPEntry *data, int size, int buckets[NBUCKETS], int pass)
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
#define DNXHD_VARIABLE
Indicate that a CIDEntry value must be read in the bitstream.
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
AVIOContext * pb
I/O context.
static void radix_count(const RCCMPEntry *data, int size, int buckets[RADIX_PASSES][NBUCKETS])
static const AVClass dnxhd_class
#define AV_PIX_FMT_YUV422P10
int ff_dnxhd_get_cid_table(int cid)
static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
enum AVPictureType pict_type
Picture type of the frame.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
void ff_dnxhdenc_init_x86(DNXHDEncContext *ctx)
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
static av_cold int dnxhd_init_vlc(DNXHDEncContext *ctx)
static av_always_inline int dnxhd_ssd_block(int16_t *qblock, int16_t *block)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int flags
A combination of AV_PKT_FLAG values.
static int dnxhd_find_qscale(DNXHDEncContext *ctx)
#define FF_PROFILE_DNXHR_SQ
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
#define i(width, name, range_min, range_max)
static int put_bits_count(PutBitContext *s)
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
const uint8_t ff_zigzag_direct[64]
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int get_bucket(int value, int shift)
int ff_dct_encode_init(MpegEncContext *s)
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
main external API structure.
int active_thread_type
Which multithreading methods are in use by the codec.
static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth)
static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
#define FF_PROFILE_DNXHR_HQX
#define FF_MB_DECISION_RD
rate distortion
static int shift(int a, int b)
#define FF_DISABLE_DEPRECATION_WARNINGS
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Undefined Behavior In the C some operations are like signed integer overflow
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
#define LOCAL_ALIGNED_16(t, v,...)
This structure stores compressed data.
#define FF_PROFILE_DNXHR_444
int width
picture width / height.
The exact code depends on how similar the blocks are and how related they are to the block
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static int first_field(const struct video_data *s)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
static av_always_inline void dnxhd_10bit_get_pixels_8x4_sym(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t line_size)
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, int16_t *block, int n, int qscale, int last_index)
static const AVCodecDefault dnxhd_defaults[]