Go to the documentation of this file.
76 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
77 #define OFFSET(x) offsetof(XPSNRContext, x)
90 static uint64_t
highds(
const int x_act,
const int y_act,
const int w_act,
const int h_act,
const int16_t *o_m0,
const int o)
94 for (
int y = y_act; y < h_act; y += 2) {
95 for (
int x = x_act; x < w_act; x += 2) {
96 const int f = 12 * ((int)o_m0[ y *o + x ] + (
int)o_m0[ y *o + x+1] + (int)o_m0[(y+1)*o + x ] + (int)o_m0[(y+1)*o + x+1])
97 - 3 * ((
int)o_m0[(y-1)*o + x ] + (
int)o_m0[(y-1)*o + x+1] + (
int)o_m0[(y+2)*o + x ] + (
int)o_m0[(y+2)*o + x+1])
98 - 3 * ((int)o_m0[ y *o + x-1] + (
int)o_m0[ y *o + x+2] + (int)o_m0[(y+1)*o + x-1] + (int)o_m0[(y+1)*o + x+2])
99 - 2 * ((
int)o_m0[(y-1)*o + x-1] + (
int)o_m0[(y-1)*o + x+2] + (
int)o_m0[(y+2)*o + x-1] + (
int)o_m0[(y+2)*o + x+2])
100 - ((int)o_m0[(y-2)*o + x-1] + (int)o_m0[(y-2)*o + x ] + (int)o_m0[(y-2)*o + x+1] + (int)o_m0[(y-2)*o + x+2]
101 + (int)o_m0[(y+3)*o + x-1] + (int)o_m0[(y+3)*o + x ] + (int)o_m0[(y+3)*o + x+1] + (int)o_m0[(y+3)*o + x+2]
102 + (int)o_m0[(y-1)*o + x-2] + (int)o_m0[ y *o + x-2] + (
int)o_m0[(y+1)*o + x-2] + (
int)o_m0[(y+2)*o + x-2]
103 + (
int)o_m0[(y-1)*o + x+3] + (
int)o_m0[ y *o + x+3] + (int)o_m0[(y+1)*o + x+3] + (int)o_m0[(y+2)*o + x+3]);
104 sa_act += (uint64_t)
abs(
f);
110 static uint64_t
diff1st(
const uint32_t w_act,
const uint32_t h_act,
const int16_t *o_m0, int16_t *o_m1,
const int o)
114 for (uint32_t y = 0; y < h_act; y += 2) {
115 for (uint32_t x = 0; x < w_act; x += 2) {
116 const int t = (int)o_m0[y*o + x] + (
int)o_m0[y*o + x+1] + (int)o_m0[(y+1)*o + x] + (int)o_m0[(y+1)*o + x+1]
117 - ((int)o_m1[y*o + x] + (
int)o_m1[y*o + x+1] + (int)o_m1[(y+1)*o + x] + (int)o_m1[(y+1)*o + x+1]);
118 ta_act += (uint64_t)
abs(t);
119 o_m1[y*o + x ] = o_m0[y*o + x ]; o_m1[(y+1)*o + x ] = o_m0[(y+1)*o + x ];
120 o_m1[y*o + x+1] = o_m0[y*o + x+1]; o_m1[(y+1)*o + x+1] = o_m0[(y+1)*o + x+1];
126 static uint64_t
diff2nd(
const uint32_t w_act,
const uint32_t h_act,
const int16_t *o_m0, int16_t *o_m1, int16_t *o_m2,
const int o)
130 for (uint32_t y = 0; y < h_act; y += 2) {
131 for (uint32_t x = 0; x < w_act; x += 2) {
132 const int t = (int)o_m0[y*o + x] + (
int)o_m0[y*o + x+1] + (int)o_m0[(y+1)*o + x] + (int)o_m0[(y+1)*o + x+1]
133 - 2 * ((int)o_m1[y*o + x] + (
int)o_m1[y*o + x+1] + (int)o_m1[(y+1)*o + x] + (int)o_m1[(y+1)*o + x+1])
134 + (
int)o_m2[y*o + x] + (int)o_m2[y*o + x+1] + (
int)o_m2[(y+1)*o + x] + (
int)o_m2[(y+1)*o + x+1];
135 ta_act += (uint64_t)
abs(t);
136 o_m2[y*o + x ] = o_m1[y*o + x ]; o_m2[(y+1)*o + x ] = o_m1[(y+1)*o + x ];
137 o_m2[y*o + x+1] = o_m1[y*o + x+1]; o_m2[(y+1)*o + x+1] = o_m1[(y+1)*o + x+1];
138 o_m1[y*o + x ] = o_m0[y*o + x ]; o_m1[(y+1)*o + x ] = o_m0[(y+1)*o + x ];
139 o_m1[y*o + x+1] = o_m0[y*o + x+1]; o_m1[(y+1)*o + x+1] = o_m0[(y+1)*o + x+1];
145 static uint64_t
sse_line_16bit(
const uint8_t *blk_org8,
const uint8_t *blk_rec8,
int block_width)
147 const uint16_t *blk_org = (
const uint16_t *) blk_org8;
148 const uint16_t *blk_rec = (
const uint16_t *) blk_rec8;
151 for (
int x = 0; x < block_width; x++) {
162 const int16_t *blk_org,
const uint32_t stride_org,
163 const int16_t *blk_rec,
const uint32_t stride_rec,
164 const uint32_t block_width,
const uint32_t block_height)
168 for (uint32_t y = 0; y < block_height; y++) {
169 sse +=
s->dsp.sse_line((
const uint8_t *) blk_org, (
const uint8_t *) blk_rec, (
int) block_width);
170 blk_org += stride_org;
171 blk_rec += stride_rec;
179 const int16_t *pic_org,
const uint32_t stride_org,
180 int16_t *pic_org_m1, int16_t *pic_org_m2,
181 const int16_t *pic_rec,
const uint32_t stride_rec,
182 const uint32_t offset_x,
const uint32_t offset_y,
183 const uint32_t block_width,
const uint32_t block_height,
184 const uint32_t
bit_depth,
const uint32_t int_frame_rate,
double *ms_act)
186 const int o = (int) stride_org;
187 const int r = (int) stride_rec;
188 const int16_t *o_m0 = pic_org + offset_y * o + offset_x;
189 int16_t *o_m1 = pic_org_m1 + offset_y * o + offset_x;
190 int16_t *o_m2 = pic_org_m2 + offset_y * o + offset_x;
191 const int16_t *r_m0 = pic_rec + offset_y *
r + offset_x;
192 const int b_val = (
s->plane_width[0] *
s->plane_height[0] > 2048 * 1152 ? 2 : 1);
193 const int x_act = (offset_x > 0 ? 0 : b_val);
194 const int y_act = (offset_y > 0 ? 0 : b_val);
195 const int w_act = (offset_x + block_width < (uint32_t)
s->plane_width [0] ? (
int) block_width : (int) block_width - b_val);
196 const int h_act = (offset_y + block_height < (uint32_t)
s->plane_height[0] ? (
int) block_height : (int) block_height - b_val);
200 block_width, block_height);
204 if (w_act <= x_act || h_act <= y_act)
209 sa_act =
s->dsp.highds_func(x_act, y_act, w_act, h_act, o_m0, o);
211 highds(x_act, y_act, w_act, h_act, o_m0, o);
213 for (
int y = y_act; y < h_act; y++) {
214 for (
int x = x_act; x < w_act; x++) {
215 const int f = 12 * (int)o_m0[y*o + x] - 2 * ((
int)o_m0[y*o + x-1] + (int)o_m0[y*o + x+1] + (
int)o_m0[(y-1)*o + x] + (
int)o_m0[(y+1)*o + x])
216 - ((int)o_m0[(y-1)*o + x-1] + (int)o_m0[(y-1)*o + x+1] + (int)o_m0[(y+1)*o + x-1] + (int)o_m0[(y+1)*o + x+1]);
217 sa_act += (uint64_t)
abs(
f);
223 *ms_act = (
double) sa_act / ((
double) (w_act - x_act) * (
double) (h_act - y_act));
226 if (int_frame_rate < 32)
227 ta_act =
s->dsp.diff1st_func(block_width, block_height, o_m0, o_m1, o);
229 ta_act =
s->dsp.diff2nd_func(block_width, block_height, o_m0, o_m1, o_m2, o);
231 if (int_frame_rate < 32) {
232 for (uint32_t y = 0; y < block_height; y++) {
233 for (uint32_t x = 0; x < block_width; x++) {
234 const int t = (int)o_m0[y * o + x] - (
int)o_m1[y * o + x];
237 o_m1[y * o + x] = o_m0[y * o + x];
241 for (uint32_t y = 0; y < block_height; y++) {
242 for (uint32_t x = 0; x < block_width; x++) {
243 const int t = (int)o_m0[y * o + x] - 2 * (
int)o_m1[y * o + x] + (int)o_m2[y * o + x];
246 o_m2[y * o + x] = o_m1[y * o + x];
247 o_m1[y * o + x] = o_m0[y * o + x];
254 *ms_act += (
double) ta_act / ((
double) block_width * (
double) block_height);
257 if (*ms_act < (
double) (1 << (
bit_depth - 6)))
258 *ms_act = (
double) (1 << (
bit_depth - 6));
266 static inline double get_avg_xpsnr (
const double sqrt_wsse_val,
const double sum_xpsnr_val,
267 const uint32_t image_width,
const uint32_t image_height,
268 const uint64_t max_error_64,
const uint64_t num_frames_64)
270 if (num_frames_64 == 0)
273 if (sqrt_wsse_val >= (
double) num_frames_64) {
274 const double avg_dist = sqrt_wsse_val / (
double) num_frames_64;
275 const uint64_t num64 = (uint64_t) image_width * (uint64_t) image_height * max_error_64;
277 return 10.0 * log10((
double) num64 / ((
double) avg_dist * (
double) avg_dist));
280 return sum_xpsnr_val / (
double) num_frames_64;
284 uint64_t *
const wsse64)
287 const uint32_t
w =
s->plane_width [0];
288 const uint32_t
h =
s->plane_height[0];
289 const double r = (
double)(
w *
h) / (3840.0 * 2160.0);
292 const uint32_t w_blk = (
w +
b - 1) /
b;
293 const double avg_act = sqrt(16.0 * (
double) (1 << (2 *
s->depth - 9)) / sqrt(
FFMAX(0.00001,
295 const int *stride_org = (
s->bpp == 1 ?
s->plane_width :
s->line_sizes);
296 uint32_t x, y, idx_blk = 0;
297 double *
const sse_luma =
s->sse_luma;
301 if (!wsse64 || (
s->depth < 6) || (
s->depth > 16) || (
s->num_comps <= 0) ||
302 (
s->num_comps > 3) || (
w == 0) || (
h == 0)) {
306 if (!
weights || (
b >= 4 && !sse_luma)) {
312 const int16_t *p_org = org[0];
313 const uint32_t s_org = stride_org[0] /
s->bpp;
314 const int16_t *p_rec = rec[0];
315 const uint32_t s_rec =
s->plane_width[0];
316 int16_t *p_org_m1 = org_m1[0];
317 int16_t *p_org_m2 = org_m2[0];
318 double wsse_luma = 0.0;
320 for (y = 0; y <
h; y +=
b) {
321 const uint32_t block_height = (y +
b >
h ?
h - y :
b);
323 for (x = 0; x <
w; x +=
b, idx_blk++) {
324 const uint32_t block_width = (x +
b >
w ?
w - x :
b);
325 double ms_act = 1.0, ms_act_prev = 0.0;
331 block_width, block_height,
332 s->depth,
s->frame_rate, &ms_act);
333 weights[idx_blk] = 1.0 / sqrt(ms_act);
335 if (
w *
h <= 640 * 480) {
337 ms_act_prev = (idx_blk > 1 ?
weights[idx_blk - 2] : 0);
342 ms_act_prev =
FFMAX(ms_act_prev,
weights[idx_blk - 1 - w_blk]);
343 if ((idx_blk > 0) && (
weights[idx_blk - 1] > ms_act_prev))
344 weights[idx_blk - 1] = ms_act_prev;
346 if ((x +
b >=
w) && (y +
b >=
h) && (idx_blk > w_blk)) {
348 if (
weights[idx_blk] > ms_act_prev)
349 weights[idx_blk] = ms_act_prev;
355 for (y = idx_blk = 0; y <
h; y +=
b) {
356 for (x = 0; x <
w; x +=
b, idx_blk++) {
357 wsse_luma += sse_luma[idx_blk] *
weights[idx_blk];
360 wsse64[0] = (wsse_luma <= 0.0 ? 0 : (uint64_t) (wsse_luma * avg_act + 0.5));
363 for (
c = 0;
c <
s->num_comps;
c++) {
364 const int16_t *p_org = org[
c];
365 const uint32_t s_org = stride_org[
c] /
s->bpp;
366 const int16_t *p_rec = rec[
c];
367 const uint32_t s_rec =
s->plane_width[
c];
368 const uint32_t w_pln =
s->plane_width[
c];
369 const uint32_t h_pln =
s->plane_height[
c];
376 const uint32_t bx = (
b * w_pln) /
w;
377 const uint32_t by = (
b * h_pln) /
h;
378 double wsse_chroma = 0.0;
380 for (y = idx_blk = 0; y < h_pln; y += by) {
381 const uint32_t block_height = (y + by > h_pln ? h_pln - y : by);
383 for (x = 0; x < w_pln; x += bx, idx_blk++) {
384 const uint32_t block_width = (x + bx > w_pln ? w_pln - x : bx);
387 p_rec + y * s_rec + x, s_rec,
388 block_width, block_height) *
weights[idx_blk];
391 wsse64[
c] = (wsse_chroma <= 0.0 ? 0 : (uint64_t) (wsse_chroma * avg_act + 0.5));
415 const uint32_t
w =
s->plane_width [0];
416 const uint32_t
h =
s->plane_height[0];
417 const uint32_t
b =
FFMAX(0, 4 * (
int32_t) (32.0 * sqrt((
double) (
w *
h) / (3840.0 * 2160.0)) + 0.5));
418 const uint32_t w_blk = (
w +
b - 1) /
b;
419 const uint32_t h_blk = (
h +
b - 1) /
b;
425 uint64_t wsse64 [3] = {0, 0, 0};
432 if (
ctx->is_disabled || !
ref)
434 metadata = &
master->metadata;
442 for (
c = 0;
c <
s->num_comps;
c++) {
446 const int stride_org_bpp = (
s->bpp == 1 ?
s->plane_width[
c] :
s->line_sizes[
c] /
s->bpp);
448 if (!
s->buf_org_m1[
c])
450 if (!
s->buf_org_m2[
c])
453 porg_m1[
c] = (int16_t *)
s->buf_org_m1[
c]->data;
454 porg_m2[
c] = (int16_t *)
s->buf_org_m2[
c]->data;
459 for (
c = 0;
c <
s->num_comps;
c++) {
460 const int m =
s->line_sizes[
c];
461 const int r =
ref->linesize[
c];
462 const int o =
s->plane_width[
c];
469 porg[
c] = (int16_t *)
s->buf_org[
c]->data;
470 prec[
c] = (int16_t *)
s->buf_rec[
c]->data;
472 for (
int y = 0; y <
s->plane_height[
c]; y++) {
473 for (
int x = 0; x <
s->plane_width[
c]; x++) {
474 porg[
c][y * o + x] = (int16_t)
master->data[
c][y * m + x];
475 prec[
c][y * o + x] = (int16_t)
ref->data[
c][y *
r + x];
480 for (
c = 0;
c <
s->num_comps;
c++) {
481 porg[
c] = (int16_t *)
master->data[
c];
482 prec[
c] = (int16_t *)
ref->data[
c];
487 ret_value =
get_wsse(
ctx, (int16_t **) &porg, (int16_t **) &porg_m1, (int16_t **) &porg_m2,
488 (int16_t **) &prec, wsse64);
492 for (
c = 0;
c <
s->num_comps;
c++) {
493 const double sqrt_wsse = sqrt((
double) wsse64[
c]);
496 s->plane_width[
c],
s->plane_height[
c],
497 s->max_error_64, 1 );
498 s->sum_wdist[
c] += sqrt_wsse;
499 s->sum_xpsnr[
c] += cur_xpsnr[
c];
500 s->and_is_inf[
c] &=
isinf(cur_xpsnr[
c]);
504 for (
int j = 0; j <
s->num_comps; j++) {
505 int c =
s->is_rgb ?
s->rgba_map[j] : j;
506 set_meta(metadata,
"lavfi.xpsnr.xpsnr.",
s->comps[j], cur_xpsnr[
c]);
510 fprintf(
s->stats_file,
"n: %4"PRId64
"",
s->num_frames_64);
512 for (
c = 0;
c <
s->num_comps;
c++)
513 fprintf(
s->stats_file,
" XPSNR %c: %3.4f",
s->comps[
c], cur_xpsnr[
c]);
514 fprintf(
s->stats_file,
"\n");
525 if (
s->stats_file_str) {
526 if (!strcmp(
s->stats_file_str,
"-"))
527 s->stats_file = stdout;
531 if (!
s->stats_file) {
532 const int err =
AVERROR(errno);
545 for (
c = 0;
c < 3;
c++) {
550 s->sum_wdist [
c] = 0.0;
551 s->sum_xpsnr [
c] = 0.0;
552 s->and_is_inf[
c] = 1;
562 #define PF_NOALPHA(suf) AV_PIX_FMT_YUV420##suf, AV_PIX_FMT_YUV422##suf, AV_PIX_FMT_YUV444##suf
563 #define PF_ALPHA(suf) AV_PIX_FMT_YUVA420##suf, AV_PIX_FMT_YUVA422##suf, AV_PIX_FMT_YUVA444##suf
564 #define PF(suf) PF_NOALPHA(suf), PF_ALPHA(suf)
582 if ((
ctx->inputs[0]->w !=
ctx->inputs[1]->w) ||
583 (
ctx->inputs[0]->h !=
ctx->inputs[1]->h)) {
587 if (
ctx->inputs[0]->format !=
ctx->inputs[1]->format) {
592 s->bpp = (
desc->comp[0].depth <= 8 ? 1 : 2);
593 s->depth =
desc->comp[0].depth;
594 s->max_error_64 = (1 <<
s->depth) - 1;
595 s->max_error_64 *=
s->max_error_64;
599 s->num_comps = (
desc->nb_components > 3 ? 3 :
desc->nb_components);
602 s->comps[0] = (
s->is_rgb ?
'r' :
'y');
603 s->comps[1] = (
s->is_rgb ?
'g' :
'u');
604 s->comps[2] = (
s->is_rgb ?
'b' :
'v');
608 s->plane_width [0] =
s->plane_width [3] =
inlink->w;
610 s->plane_height[0] =
s->plane_height[3] =
inlink->h;
635 outlink->
w = mainlink->
w;
636 outlink->
h = mainlink->
h;
648 av_log(
ctx,
AV_LOG_WARNING,
"not matching timebases found between first input: %d/%d and second input %d/%d, results may be incorrect!\n",
650 ctx->inputs[1]->time_base.num,
ctx->inputs[1]->time_base.den);
667 if (
s->num_frames_64 > 0) {
668 const double xpsnr_luma =
get_avg_xpsnr(
s->sum_wdist[0],
s->sum_xpsnr[0],
669 s->plane_width[0],
s->plane_height[0],
670 s->max_error_64,
s->num_frames_64);
671 double xpsnr_min = xpsnr_luma;
676 fprintf(
s->stats_file,
"\nXPSNR average, %"PRId64
" frames",
s->num_frames_64);
677 fprintf(
s->stats_file,
" %c: %3.4f",
s->comps[0], xpsnr_luma);
680 for (
c = 1;
c <
s->num_comps;
c++) {
682 s->plane_width[
c],
s->plane_height[
c],
683 s->max_error_64,
s->num_frames_64);
684 if (xpsnr_min > xpsnr_chroma)
685 xpsnr_min = xpsnr_chroma;
688 if (
s->stats_file &&
s->stats_file != stdout)
689 fprintf(
s->stats_file,
" %c: %3.4f",
s->comps[
c], xpsnr_chroma);
692 if (
s->num_comps > 1) {
694 if (
s->stats_file &&
s->stats_file != stdout)
695 fprintf(
s->stats_file,
" (minimum: %3.4f)\n", xpsnr_min);
698 if (
s->stats_file &&
s->stats_file != stdout)
699 fprintf(
s->stats_file,
"\n");
705 if (
s->stats_file &&
s->stats_file != stdout)
706 fclose(
s->stats_file);
711 for (
c = 0;
c <
s->num_comps;
c++) {
718 for (
c = 0;
c <
s->num_comps;
c++) {
748 .description =
NULL_IF_CONFIG_SMALL(
"Calculate the extended perceptually weighted peak signal-to-noise ratio (XPSNR) between two video streams."),
749 .preinit = xpsnr_framesync_preinit,
754 .priv_class = &xpsnr_class,
static void error(const char *err)
#define AV_PIX_FMT_GBRAP16
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
#define FILTER_PIXFMTS_ARRAY(array)
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
FRAMESYNC_DEFINE_CLASS(xpsnr, XPSNRContext, fs)
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static av_cold int init(AVFilterContext *ctx)
AVBufferRef * buf_org_m2[3]
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
#define FILTER_INPUTS(array)
This structure describes decoded (raw) audio or video data.
static av_cold void uninit(AVFilterContext *ctx)
void ff_xpsnr_init_x86(PSNRDSPContext *dsp, const int bpp)
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
const char * name
Filter name.
static void bit_depth(AudioStatsContext *s, const uint64_t *const mask, uint8_t *depth)
A link between two filters.
AVBufferRef * buf_org_m1[3]
Link properties exposed to filter code, but not external callers.
const AVFilter ff_vf_xpsnr
static int activate(AVFilterContext *ctx)
#define AV_PIX_FMT_GBRP14
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
#define AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GRAY16
A filter pad used for either input or output.
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static double calc_squared_error_and_weight(XPSNRContext const *s, const int16_t *pic_org, const uint32_t stride_org, int16_t *pic_org_m1, int16_t *pic_org_m2, const int16_t *pic_rec, const uint32_t stride_rec, const uint32_t offset_x, const uint32_t offset_y, const uint32_t block_width, const uint32_t block_height, const uint32_t bit_depth, const uint32_t int_frame_rate, double *ms_act)
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP12
#define AV_CEIL_RSHIFT(a, b)
static int config_output(AVFilterLink *outlink)
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
#define AV_PIX_FMT_GRAY14
#define FILTER_OUTPUTS(array)
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
#define AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GBRP16
Describe the class of an AVClass context structure.
#define fs(width, name, subs,...)
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
static void set_meta(AVDictionary **metadata, const char *key, char comp, float d)
static uint64_t diff1st(const uint32_t w_act, const uint32_t h_act, const int16_t *o_m0, int16_t *o_m1, const int o)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static FilterLink * ff_filter_link(AVFilterLink *link)
static enum AVPixelFormat xpsnr_formats[]
static const AVOption xpsnr_options[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
AVFilterContext * src
source filter
static uint64_t calc_squared_error(XPSNRContext const *s, const int16_t *blk_org, const uint32_t stride_org, const int16_t *blk_rec, const uint32_t stride_rec, const uint32_t block_width, const uint32_t block_height)
#define AV_LOG_INFO
Standard information.
static const AVFilterPad xpsnr_outputs[]
int w
agreed upon image width
#define AV_PIX_FMT_GBRP12
#define av_malloc_array(a, b)
static const int weights[]
static double get_avg_xpsnr(const double sqrt_wsse_val, const double sum_xpsnr_val, const uint32_t image_width, const uint32_t image_height, const uint64_t max_error_64, const uint64_t num_frames_64)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static uint64_t highds(const int x_act, const int y_act, const int w_act, const int h_act, const int16_t *o_m0, const int o)
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
const char * name
Pad name.
FILE * avpriv_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
static const AVFilterPad xpsnr_inputs[]
int h
agreed upon image height
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
static int ref[MAX_W *MAX_W]
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static int get_wsse(AVFilterContext *ctx, int16_t **org, int16_t **org_m1, int16_t **org_m2, int16_t **rec, uint64_t *const wsse64)
static int config_input_ref(AVFilterLink *inlink)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
A reference to a data buffer.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable.
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
static int do_xpsnr(FFFrameSync *fs)
int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
#define AV_PIX_FMT_GRAY12
static uint64_t sse_line_16bit(const uint8_t *blk_org8, const uint8_t *blk_rec8, int block_width)
static uint64_t diff2nd(const uint32_t w_act, const uint32_t h_act, const int16_t *o_m0, int16_t *o_m1, int16_t *o_m2, const int o)