FFmpeg
vf_scale_npp.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * scale video filter
22  */
23 
24 #include <nppi.h>
25 #include <stdio.h>
26 #include <string.h>
27 
28 #include "libavutil/hwcontext.h"
30 #include "libavutil/cuda_check.h"
31 #include "libavutil/internal.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/parseutils.h"
35 #include "libavutil/eval.h"
36 #include "libavutil/pixdesc.h"
37 
38 #include "avfilter.h"
39 #include "filters.h"
40 #include "formats.h"
41 #include "scale_eval.h"
42 #include "video.h"
43 
44 #define CHECK_CU(x) FF_CUDA_CHECK_DL(ctx, device_hwctx->internal->cuda_dl, x)
45 
46 static const enum AVPixelFormat supported_formats[] = {
51 };
52 
53 static const enum AVPixelFormat deinterleaved_formats[][2] = {
55 };
56 
57 enum ScaleStage {
62 };
63 
64 typedef struct NPPScaleStageContext {
68 
69  struct {
70  int width;
71  int height;
72  } planes_in[4], planes_out[4];
73 
77 
78 static const char *const var_names[] = {
79  "in_w", "iw",
80  "in_h", "ih",
81  "out_w", "ow",
82  "out_h", "oh",
83  "a",
84  "sar",
85  "dar",
86  "n",
87  "t",
88 #if FF_API_FRAME_PKT
89  "pos",
90 #endif
91  "main_w",
92  "main_h",
93  "main_a",
94  "main_sar",
95  "main_dar", "mdar",
96  "main_n",
97  "main_t",
98 #if FF_API_FRAME_PKT
99  "main_pos",
100 #endif
101  NULL
102 };
103 
104 enum var_name {
114 #if FF_API_FRAME_PKT
115  VAR_POS,
116 #endif
124 #if FF_API_FRAME_PKT
126 #endif
128 };
129 
130 enum EvalMode {
134 };
135 
136 typedef struct NPPScaleContext {
137  const AVClass *class;
138 
142 
144 
145  /**
146  * New dimensions. Special values are:
147  * 0 = original width/height
148  * -1 = keep original aspect
149  */
150  int w, h;
151 
152  /**
153  * Output sw format. AV_PIX_FMT_NONE for no conversion.
154  */
156 
157  char *w_expr; ///< width expression string
158  char *h_expr; ///< height expression string
159  char *format_str;
160 
164 
166 
167  char* size_str;
168 
171 
173 
176 
178 #define IS_SCALE2REF(ctx) ((ctx)->filter == &ff_vf_scale2ref_npp.p)
179 
180 static int config_props(AVFilterLink *outlink);
181 
183 {
184  NPPScaleContext* scale = ctx->priv;
185  unsigned vars_w[VARS_NB] = {0}, vars_h[VARS_NB] = {0};
186 
187  if (!scale->w_pexpr && !scale->h_pexpr)
188  return AVERROR(EINVAL);
189 
190  if (scale->w_pexpr)
191  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
192  if (scale->h_pexpr)
193  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
194 
195  if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) {
196  av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr);
197  return AVERROR(EINVAL);
198  }
199 
200  if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) {
201  av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr);
202  return AVERROR(EINVAL);
203  }
204 
205  if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) &&
206  (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) {
207  av_log(ctx, AV_LOG_WARNING, "Circular references detected for width '%s' and height '%s' - possibly invalid.\n", scale->w_expr, scale->h_expr);
208  }
209 
210  if (!IS_SCALE2REF(ctx) &&
211  (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] ||
212  vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] ||
213  vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] ||
214  vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] ||
215  vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] ||
216  vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] ||
217  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
218  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T]
220  || vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]
221 #endif
222  )) {
223  av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref_npp variables are not valid in scale_npp filter.\n");
224  return AVERROR(EINVAL);
225  }
226 
227  if (scale->eval_mode == EVAL_MODE_INIT &&
228  (vars_w[VAR_N] || vars_h[VAR_N] ||
229  vars_w[VAR_T] || vars_h[VAR_T] ||
231  vars_w[VAR_POS] || vars_h[VAR_POS] ||
232 #endif
233  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
234  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T]
236  || vars_w[VAR_S2R_MAIN_POS] || vars_h[VAR_S2R_MAIN_POS]
237 #endif
238  ) ) {
239  av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', are not valid in init eval_mode.\n");
240  return AVERROR(EINVAL);
241  }
242 
243  return 0;
244 }
245 
246 static int nppscale_parse_expr(AVFilterContext* ctx, char* str_expr,
247  AVExpr** pexpr_ptr, const char* var,
248  const char* args)
249 {
250  NPPScaleContext* scale = ctx->priv;
251  int ret, is_inited = 0;
252  char* old_str_expr = NULL;
253  AVExpr* old_pexpr = NULL;
254 
255  if (str_expr) {
256  old_str_expr = av_strdup(str_expr);
257  if (!old_str_expr)
258  return AVERROR(ENOMEM);
259  av_opt_set(scale, var, args, 0);
260  }
261 
262  if (*pexpr_ptr) {
263  old_pexpr = *pexpr_ptr;
264  *pexpr_ptr = NULL;
265  is_inited = 1;
266  }
267 
268  ret = av_expr_parse(pexpr_ptr, args, var_names, NULL, NULL, NULL, NULL, 0,
269  ctx);
270  if (ret < 0) {
271  av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var,
272  args);
273  goto revert;
274  }
275 
276  ret = check_exprs(ctx);
277  if (ret < 0)
278  goto revert;
279 
280  if (is_inited && (ret = config_props(ctx->outputs[0])) < 0)
281  goto revert;
282 
283  av_expr_free(old_pexpr);
284  old_pexpr = NULL;
285  av_freep(&old_str_expr);
286 
287  return 0;
288 
289 revert:
290  av_expr_free(*pexpr_ptr);
291  *pexpr_ptr = NULL;
292  if (old_str_expr) {
293  av_opt_set(scale, var, old_str_expr, 0);
294  av_free(old_str_expr);
295  }
296  if (old_pexpr)
297  *pexpr_ptr = old_pexpr;
298 
299  return ret;
300 }
301 
303 {
304  NPPScaleContext* scale = ctx->priv;
305  int i, ret;
306 
307  if (!strcmp(scale->format_str, "same")) {
308  scale->format = AV_PIX_FMT_NONE;
309  } else {
310  scale->format = av_get_pix_fmt(scale->format_str);
311  if (scale->format == AV_PIX_FMT_NONE) {
312  av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", scale->format_str);
313  return AVERROR(EINVAL);
314  }
315  }
316 
317  if (scale->size_str && (scale->w_expr || scale->h_expr)) {
319  "Size and width/height exprs cannot be set at the same time.\n");
320  return AVERROR(EINVAL);
321  }
322 
323  if (scale->w_expr && !scale->h_expr)
324  FFSWAP(char*, scale->w_expr, scale->size_str);
325 
326  if (scale->size_str) {
327  char buf[32];
328  ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str);
329  if (0 > ret) {
330  av_log(ctx, AV_LOG_ERROR, "Invalid size '%s'\n", scale->size_str);
331  return ret;
332  }
333 
334  snprintf(buf, sizeof(buf) - 1, "%d", scale->w);
335  ret = av_opt_set(scale, "w", buf, 0);
336  if (ret < 0)
337  return ret;
338 
339  snprintf(buf, sizeof(buf) - 1, "%d", scale->h);
340  ret = av_opt_set(scale, "h", buf, 0);
341  if (ret < 0)
342  return ret;
343  }
344 
345  if (!scale->w_expr) {
346  ret = av_opt_set(scale, "w", "iw", 0);
347  if (ret < 0)
348  return ret;
349  }
350 
351  if (!scale->h_expr) {
352  ret = av_opt_set(scale, "h", "ih", 0);
353  if (ret < 0)
354  return ret;
355  }
356 
357  ret = nppscale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
358  if (ret < 0)
359  return ret;
360 
361  ret = nppscale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
362  if (ret < 0)
363  return ret;
364 
365  for (i = 0; i < FF_ARRAY_ELEMS(scale->stages); i++) {
366  scale->stages[i].frame = av_frame_alloc();
367  if (!scale->stages[i].frame)
368  return AVERROR(ENOMEM);
369  }
370  scale->tmp_frame = av_frame_alloc();
371  if (!scale->tmp_frame)
372  return AVERROR(ENOMEM);
373 
374  return 0;
375 }
376 
378 {
379  NPPScaleContext* scale = ctx->priv;
380  const char scale2ref = IS_SCALE2REF(ctx);
381  const AVFilterLink* inlink = ctx->inputs[scale2ref ? 1 : 0];
382  char* expr;
383  int eval_w, eval_h;
384  int ret;
385  double res;
386 
387  scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w;
388  scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h;
389  scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN;
390  scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN;
391  scale->var_values[VAR_A] = (double)inlink->w / inlink->h;
392  scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
393  (double)inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
394  scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR];
395 
396  if (scale2ref) {
397  const AVFilterLink* main_link = ctx->inputs[0];
398 
399  scale->var_values[VAR_S2R_MAIN_W] = main_link->w;
400  scale->var_values[VAR_S2R_MAIN_H] = main_link->h;
401  scale->var_values[VAR_S2R_MAIN_A] = (double)main_link->w / main_link->h;
402  scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ?
403  (double)main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1;
404  scale->var_values[VAR_S2R_MAIN_DAR] = scale->var_values[VAR_S2R_MDAR] =
405  scale->var_values[VAR_S2R_MAIN_A] * scale->var_values[VAR_S2R_MAIN_SAR];
406  }
407 
408  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
409  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int)res == 0 ? inlink->w : (int)res;
410 
411  res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL);
412  if (isnan(res)) {
413  expr = scale->h_expr;
414  ret = AVERROR(EINVAL);
415  goto fail;
416  }
417  eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int)res == 0 ? inlink->h : (int)res;
418 
419  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
420  if (isnan(res)) {
421  expr = scale->w_expr;
422  ret = AVERROR(EINVAL);
423  goto fail;
424  }
425  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int)res == 0 ? inlink->w : (int)res;
426 
427  scale->w = eval_w;
428  scale->h = eval_h;
429 
430  return 0;
431 
432 fail:
433  av_log(ctx, AV_LOG_ERROR, "Error when evaluating the expression '%s'.\n",
434  expr);
435  return ret;
436 }
437 
439 {
440  NPPScaleContext *s = ctx->priv;
441  int i;
442 
443  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
444  av_frame_free(&s->stages[i].frame);
445  av_buffer_unref(&s->stages[i].frames_ctx);
446  }
447  av_frame_free(&s->tmp_frame);
448 
449  av_expr_free(s->w_pexpr);
450  av_expr_free(s->h_pexpr);
451  s->w_pexpr = s->h_pexpr = NULL;
452 }
453 
454 static int init_stage(NPPScaleStageContext *stage, AVBufferRef *device_ctx)
455 {
456  AVBufferRef *out_ref = NULL;
457  AVHWFramesContext *out_ctx;
458  int in_sw, in_sh, out_sw, out_sh;
459  int ret, i;
460 
461  av_pix_fmt_get_chroma_sub_sample(stage->in_fmt, &in_sw, &in_sh);
462  av_pix_fmt_get_chroma_sub_sample(stage->out_fmt, &out_sw, &out_sh);
463  if (!stage->planes_out[0].width) {
464  stage->planes_out[0].width = stage->planes_in[0].width;
465  stage->planes_out[0].height = stage->planes_in[0].height;
466  }
467 
468  for (i = 1; i < FF_ARRAY_ELEMS(stage->planes_in); i++) {
469  stage->planes_in[i].width = stage->planes_in[0].width >> in_sw;
470  stage->planes_in[i].height = stage->planes_in[0].height >> in_sh;
471  stage->planes_out[i].width = stage->planes_out[0].width >> out_sw;
472  stage->planes_out[i].height = stage->planes_out[0].height >> out_sh;
473  }
474 
475  if (AV_PIX_FMT_YUVA420P == stage->in_fmt) {
476  stage->planes_in[3].width = stage->planes_in[0].width;
477  stage->planes_in[3].height = stage->planes_in[0].height;
478  stage->planes_out[3].width = stage->planes_out[0].width;
479  stage->planes_out[3].height = stage->planes_out[0].height;
480  }
481 
482  out_ref = av_hwframe_ctx_alloc(device_ctx);
483  if (!out_ref)
484  return AVERROR(ENOMEM);
485  out_ctx = (AVHWFramesContext*)out_ref->data;
486 
487  out_ctx->format = AV_PIX_FMT_CUDA;
488  out_ctx->sw_format = stage->out_fmt;
489  out_ctx->width = FFALIGN(stage->planes_out[0].width, 32);
490  out_ctx->height = FFALIGN(stage->planes_out[0].height, 32);
491 
492  ret = av_hwframe_ctx_init(out_ref);
493  if (ret < 0)
494  goto fail;
495 
496  av_frame_unref(stage->frame);
497  ret = av_hwframe_get_buffer(out_ref, stage->frame, 0);
498  if (ret < 0)
499  goto fail;
500 
501  stage->frame->width = stage->planes_out[0].width;
502  stage->frame->height = stage->planes_out[0].height;
503 
504  av_buffer_unref(&stage->frames_ctx);
505  stage->frames_ctx = out_ref;
506 
507  return 0;
508 fail:
509  av_buffer_unref(&out_ref);
510  return ret;
511 }
512 
513 static int format_is_supported(enum AVPixelFormat fmt)
514 {
515  int i;
516 
517  for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
518  if (supported_formats[i] == fmt)
519  return 1;
520  return 0;
521 }
522 
524 {
526  int i, planes;
527 
529  if (planes == desc->nb_components)
530  return fmt;
531  for (i = 0; i < FF_ARRAY_ELEMS(deinterleaved_formats); i++)
532  if (deinterleaved_formats[i][0] == fmt)
533  return deinterleaved_formats[i][1];
534  return AV_PIX_FMT_NONE;
535 }
536 
537 static int init_processing_chain(AVFilterContext *ctx, int in_width, int in_height,
538  int out_width, int out_height)
539 {
540  NPPScaleContext *s = ctx->priv;
541  FilterLink *inl = ff_filter_link(ctx->inputs[0]);
542  FilterLink *outl = ff_filter_link(ctx->outputs[0]);
543 
544  AVHWFramesContext *in_frames_ctx;
545 
546  enum AVPixelFormat in_format;
547  enum AVPixelFormat out_format;
548  enum AVPixelFormat in_deinterleaved_format;
549  enum AVPixelFormat out_deinterleaved_format;
550 
551  int i, ret, last_stage = -1;
552 
553  /* check that we have a hw context */
554  if (!inl->hw_frames_ctx) {
555  av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n");
556  return AVERROR(EINVAL);
557  }
558  in_frames_ctx = (AVHWFramesContext*)inl->hw_frames_ctx->data;
559  in_format = in_frames_ctx->sw_format;
560  out_format = (s->format == AV_PIX_FMT_NONE) ? in_format : s->format;
561 
562  if (!format_is_supported(in_format)) {
563  av_log(ctx, AV_LOG_ERROR, "Unsupported input format: %s\n",
564  av_get_pix_fmt_name(in_format));
565  return AVERROR(ENOSYS);
566  }
567  if (!format_is_supported(out_format)) {
568  av_log(ctx, AV_LOG_ERROR, "Unsupported output format: %s\n",
569  av_get_pix_fmt_name(out_format));
570  return AVERROR(ENOSYS);
571  }
572 
573  in_deinterleaved_format = get_deinterleaved_format(in_format);
574  out_deinterleaved_format = get_deinterleaved_format(out_format);
575  if (in_deinterleaved_format == AV_PIX_FMT_NONE ||
576  out_deinterleaved_format == AV_PIX_FMT_NONE)
577  return AVERROR_BUG;
578 
579  /* figure out which stages need to be done */
580  if (in_width != out_width || in_height != out_height ||
581  in_deinterleaved_format != out_deinterleaved_format) {
582  s->stages[STAGE_RESIZE].stage_needed = 1;
583 
584  if (s->interp_algo == NPPI_INTER_SUPER &&
585  (out_width > in_width && out_height > in_height)) {
586  s->interp_algo = NPPI_INTER_LANCZOS;
587  av_log(ctx, AV_LOG_WARNING, "super-sampling not supported for output dimensions, using lanczos instead.\n");
588  }
589  if (s->interp_algo == NPPI_INTER_SUPER &&
590  !(out_width < in_width && out_height < in_height)) {
591  s->interp_algo = NPPI_INTER_CUBIC;
592  av_log(ctx, AV_LOG_WARNING, "super-sampling not supported for output dimensions, using cubic instead.\n");
593  }
594  }
595 
596  if (!s->stages[STAGE_RESIZE].stage_needed && in_format == out_format)
597  s->passthrough = 1;
598 
599  if (!s->passthrough) {
600  if (in_format != in_deinterleaved_format)
601  s->stages[STAGE_DEINTERLEAVE].stage_needed = 1;
602  if (out_format != out_deinterleaved_format)
603  s->stages[STAGE_INTERLEAVE].stage_needed = 1;
604  }
605 
606  s->stages[STAGE_DEINTERLEAVE].in_fmt = in_format;
607  s->stages[STAGE_DEINTERLEAVE].out_fmt = in_deinterleaved_format;
608  s->stages[STAGE_DEINTERLEAVE].planes_in[0].width = in_width;
609  s->stages[STAGE_DEINTERLEAVE].planes_in[0].height = in_height;
610 
611  s->stages[STAGE_RESIZE].in_fmt = in_deinterleaved_format;
612  s->stages[STAGE_RESIZE].out_fmt = out_deinterleaved_format;
613  s->stages[STAGE_RESIZE].planes_in[0].width = in_width;
614  s->stages[STAGE_RESIZE].planes_in[0].height = in_height;
615  s->stages[STAGE_RESIZE].planes_out[0].width = out_width;
616  s->stages[STAGE_RESIZE].planes_out[0].height = out_height;
617 
618  s->stages[STAGE_INTERLEAVE].in_fmt = out_deinterleaved_format;
619  s->stages[STAGE_INTERLEAVE].out_fmt = out_format;
620  s->stages[STAGE_INTERLEAVE].planes_in[0].width = out_width;
621  s->stages[STAGE_INTERLEAVE].planes_in[0].height = out_height;
622 
623  /* init the hardware contexts */
624  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
625  if (!s->stages[i].stage_needed)
626  continue;
627 
628  ret = init_stage(&s->stages[i], in_frames_ctx->device_ref);
629  if (ret < 0)
630  return ret;
631 
632  last_stage = i;
633  }
634 
635  if (last_stage >= 0)
636  outl->hw_frames_ctx = av_buffer_ref(s->stages[last_stage].frames_ctx);
637  else
639 
640  if (!outl->hw_frames_ctx)
641  return AVERROR(ENOMEM);
642 
643  return 0;
644 }
645 
646 static int config_props(AVFilterLink *outlink)
647 {
648  AVFilterContext *ctx = outlink->src;
649  AVFilterLink *inlink0 = outlink->src->inputs[0];
651  outlink->src->inputs[1] :
652  outlink->src->inputs[0];
653  NPPScaleContext *s = ctx->priv;
654  double w_adj = 1.0;
655  int ret;
656 
657  if ((ret = nppscale_eval_dimensions(ctx)) < 0)
658  goto fail;
659 
660  if (s->reset_sar)
661  w_adj = IS_SCALE2REF(ctx) ? s->var_values[VAR_S2R_MAIN_SAR] :
662  s->var_values[VAR_SAR];
663 
665  s->force_original_aspect_ratio,
666  s->force_divisible_by, w_adj);
667 
668  if (s->w > INT_MAX || s->h > INT_MAX ||
669  (s->h * inlink->w) > INT_MAX ||
670  (s->w * inlink->h) > INT_MAX)
671  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
672 
673  outlink->w = s->w;
674  outlink->h = s->h;
675 
676  ret = init_processing_chain(ctx, inlink0->w, inlink0->h, outlink->w, outlink->h);
677  if (ret < 0)
678  return ret;
679 
680  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
681  inlink->w, inlink->h, outlink->w, outlink->h);
682 
683  if (s->reset_sar)
684  outlink->sample_aspect_ratio = (AVRational){1, 1};
685  else if (inlink->sample_aspect_ratio.num)
686  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
687  outlink->w*inlink->h},
688  inlink->sample_aspect_ratio);
689  else
690  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
691 
692  return 0;
693 
694 fail:
695  return ret;
696 }
697 
698 static int config_props_ref(AVFilterLink *outlink)
699 {
700  FilterLink *outl = ff_filter_link(outlink);
701  AVFilterLink *inlink = outlink->src->inputs[1];
703  FilterLink *ol = ff_filter_link(outlink);
704 
705  outlink->w = inlink->w;
706  outlink->h = inlink->h;
707  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
708  outlink->time_base = inlink->time_base;
709  ol->frame_rate = inl->frame_rate;
710 
712 
713  return 0;
714 }
715 
717  AVFrame *out, AVFrame *in)
718 {
719  AVHWFramesContext *in_frames_ctx = (AVHWFramesContext*)in->hw_frames_ctx->data;
720  NppStatus err;
721 
722  switch (in_frames_ctx->sw_format) {
723  case AV_PIX_FMT_NV12:
724  err = nppiYCbCr420_8u_P2P3R(in->data[0], in->linesize[0],
725  in->data[1], in->linesize[1],
726  out->data, out->linesize,
727  (NppiSize){ in->width, in->height });
728  break;
729  default:
730  return AVERROR_BUG;
731  }
732  if (err != NPP_SUCCESS) {
733  av_log(ctx, AV_LOG_ERROR, "NPP deinterleave error: %d\n", err);
734  return AVERROR_UNKNOWN;
735  }
736 
737  return 0;
738 }
739 
741  AVFrame *out, AVFrame *in)
742 {
743  NPPScaleContext *s = ctx->priv;
744  NppStatus err;
745  int i;
746 
747  for (i = 0; i < FF_ARRAY_ELEMS(stage->planes_in) && i < FF_ARRAY_ELEMS(in->data) && in->data[i]; i++) {
748  int iw = stage->planes_in[i].width;
749  int ih = stage->planes_in[i].height;
750  int ow = stage->planes_out[i].width;
751  int oh = stage->planes_out[i].height;
752 
753  err = nppiResizeSqrPixel_8u_C1R(in->data[i], (NppiSize){ iw, ih },
754  in->linesize[i], (NppiRect){ 0, 0, iw, ih },
755  out->data[i], out->linesize[i],
756  (NppiRect){ 0, 0, ow, oh },
757  (double)ow / iw, (double)oh / ih,
758  0.0, 0.0, s->interp_algo);
759  if (err != NPP_SUCCESS) {
760  av_log(ctx, AV_LOG_ERROR, "NPP resize error: %d\n", err);
761  return AVERROR_UNKNOWN;
762  }
763  }
764 
765  return 0;
766 }
767 
769  AVFrame *out, AVFrame *in)
770 {
771  AVHWFramesContext *out_frames_ctx = (AVHWFramesContext*)out->hw_frames_ctx->data;
772  NppStatus err;
773 
774  switch (out_frames_ctx->sw_format) {
775  case AV_PIX_FMT_NV12:
776  err = nppiYCbCr420_8u_P3P2R((const uint8_t**)in->data,
777  in->linesize,
778  out->data[0], out->linesize[0],
779  out->data[1], out->linesize[1],
780  (NppiSize){ in->width, in->height });
781  break;
782  default:
783  return AVERROR_BUG;
784  }
785  if (err != NPP_SUCCESS) {
786  av_log(ctx, AV_LOG_ERROR, "NPP deinterleave error: %d\n", err);
787  return AVERROR_UNKNOWN;
788  }
789 
790  return 0;
791 }
792 
794  AVFrame *out, AVFrame *in) = {
798 };
799 
801 {
803  AVFilterContext *ctx = link->dst;
804  NPPScaleContext *s = ctx->priv;
805  AVFilterLink *outlink = ctx->outputs[0];
806  AVFrame *src = in;
807  char buf[32];
808  int i, ret, last_stage = -1;
809  int frame_changed;
810 
811  frame_changed = in->width != link->w ||
812  in->height != link->h ||
813  in->format != link->format ||
816 
817  if (s->eval_mode == EVAL_MODE_FRAME || frame_changed) {
818  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
819 
820  av_expr_count_vars(s->w_pexpr, vars_w, VARS_NB);
821  av_expr_count_vars(s->h_pexpr, vars_h, VARS_NB);
822 
823  if (s->eval_mode == EVAL_MODE_FRAME && !frame_changed && !IS_SCALE2REF(ctx) &&
824  !(vars_w[VAR_N] || vars_w[VAR_T]
826  || vars_w[VAR_POS]
827 #endif
828  ) &&
829  !(vars_h[VAR_N] || vars_h[VAR_T]
831  || vars_h[VAR_POS]
832 #endif
833  ) && s->w && s->h)
834  goto scale;
835 
836  if (s->eval_mode == EVAL_MODE_INIT) {
837  snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
838  av_opt_set(s, "w", buf, 0);
839  snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
840  av_opt_set(s, "h", buf, 0);
841 
842  ret = nppscale_parse_expr(ctx, NULL, &s->w_pexpr, "width", s->w_expr);
843  if (ret < 0)
844  return ret;
845 
846  ret = nppscale_parse_expr(ctx, NULL, &s->h_pexpr, "height", s->h_expr);
847  if (ret < 0)
848  return ret;
849  }
850 
851  if (IS_SCALE2REF(ctx)) {
852  s->var_values[VAR_S2R_MAIN_N] = inl->frame_count_out;
853  s->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
854 #if FF_API_FRAME_PKT
856  s->var_values[VAR_S2R_MAIN_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
858 #endif
859  } else {
860  s->var_values[VAR_N] = inl->frame_count_out;
861  s->var_values[VAR_T] = TS2T(in->pts, link->time_base);
862 #if FF_API_FRAME_PKT
864  s->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
866 #endif
867  }
868 
869  link->format = in->format;
870  link->w = in->width;
871  link->h = in->height;
872 
875 
876  if ((ret = config_props(outlink)) < 0)
877  return ret;
878  }
879 
880 scale:
881  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
882  if (!s->stages[i].stage_needed)
883  continue;
884 
885  ret = nppscale_process[i](ctx, &s->stages[i], s->stages[i].frame, src);
886  if (ret < 0)
887  return ret;
888 
889  src = s->stages[i].frame;
890  last_stage = i;
891  }
892  if (last_stage < 0)
893  return AVERROR_BUG;
894 
895  ret = av_hwframe_get_buffer(src->hw_frames_ctx, s->tmp_frame, 0);
896  if (ret < 0)
897  return ret;
898 
899  s->tmp_frame->width = src->width;
900  s->tmp_frame->height = src->height;
901 
903  av_frame_move_ref(src, s->tmp_frame);
904 
905  ret = av_frame_copy_props(out, in);
906  if (ret < 0)
907  return ret;
908 
909  if (out->width != in->width || out->height != in->height) {
910  av_frame_side_data_remove_by_props(&out->side_data, &out->nb_side_data,
912  }
913 
914  return 0;
915 }
916 
918 {
919  AVFilterContext *ctx = link->dst;
920  NPPScaleContext *s = ctx->priv;
921  AVFilterLink *outlink = ctx->outputs[0];
922  FilterLink *l = ff_filter_link(outlink);
924  AVCUDADeviceContext *device_hwctx = frames_ctx->device_ctx->hwctx;
925 
926  AVFrame *out = NULL;
927  CUcontext dummy;
928  int ret = 0;
929 
930  if (s->passthrough)
931  return ff_filter_frame(outlink, in);
932 
933  out = av_frame_alloc();
934  if (!out) {
935  ret = AVERROR(ENOMEM);
936  goto fail;
937  }
938 
939  ret = CHECK_CU(device_hwctx->internal->cuda_dl->cuCtxPushCurrent(device_hwctx->cuda_ctx));
940  if (ret < 0)
941  goto fail;
942 
943  ret = nppscale_scale(link, out, in);
944 
945  CHECK_CU(device_hwctx->internal->cuda_dl->cuCtxPopCurrent(&dummy));
946  if (ret < 0)
947  goto fail;
948 
949  av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
950  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
951  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
952  INT_MAX);
953 
954  av_frame_free(&in);
955  return ff_filter_frame(outlink, out);
956 fail:
957  av_frame_free(&in);
958  av_frame_free(&out);
959  return ret;
960 }
961 
963 {
965  NPPScaleContext *scale = link->dst->priv;
966  AVFilterLink *outlink = link->dst->outputs[1];
967  int frame_changed;
968 
969  frame_changed = in->width != link->w ||
970  in->height != link->h ||
971  in->format != link->format ||
974 
975  if (frame_changed) {
976  link->format = in->format;
977  link->w = in->width;
978  link->h = in->height;
981 
982  config_props_ref(outlink);
983  }
984 
985  if (scale->eval_mode == EVAL_MODE_FRAME) {
986  scale->var_values[VAR_N] = inl->frame_count_out;
987  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
988 #if FF_API_FRAME_PKT
990  scale->var_values[VAR_POS] = in->pkt_pos == -1 ? NAN : in->pkt_pos;
992 #endif
993  }
994 
995  return ff_filter_frame(outlink, in);
996 }
997 
998 static int request_frame(AVFilterLink *outlink)
999 {
1000  return ff_request_frame(outlink->src->inputs[0]);
1001 }
1002 
1003 static int request_frame_ref(AVFilterLink *outlink)
1004 {
1005  return ff_request_frame(outlink->src->inputs[1]);
1006 }
1007 
1008 #define OFFSET(x) offsetof(NPPScaleContext, x)
1009 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
1010 static const AVOption options[] = {
1011  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
1012  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
1013  { "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
1014  { "s", "Output video size", OFFSET(size_str), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
1015 
1016  { "interp_algo", "Interpolation algorithm used for resizing", OFFSET(interp_algo), AV_OPT_TYPE_INT, { .i64 = NPPI_INTER_CUBIC }, 0, INT_MAX, FLAGS, .unit = "interp_algo" },
1017  { "nn", "nearest neighbour", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_NN }, 0, 0, FLAGS, .unit = "interp_algo" },
1018  { "linear", "linear", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_LINEAR }, 0, 0, FLAGS, .unit = "interp_algo" },
1019  { "cubic", "cubic", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC }, 0, 0, FLAGS, .unit = "interp_algo" },
1020  { "cubic2p_bspline", "2-parameter cubic (B=1, C=0)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_BSPLINE }, 0, 0, FLAGS, .unit = "interp_algo" },
1021  { "cubic2p_catmullrom", "2-parameter cubic (B=0, C=1/2)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_CATMULLROM }, 0, 0, FLAGS, .unit = "interp_algo" },
1022  { "cubic2p_b05c03", "2-parameter cubic (B=1/2, C=3/10)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_B05C03 }, 0, 0, FLAGS, .unit = "interp_algo" },
1023  { "super", "supersampling", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_SUPER }, 0, 0, FLAGS, .unit = "interp_algo" },
1024  { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_LANCZOS }, 0, 0, FLAGS, .unit = "interp_algo" },
1025  { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, FLAGS, .unit = "force_oar" },
1026  { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, .unit = "force_oar" },
1027  { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, .unit = "force_oar" },
1028  { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, .unit = "force_oar" },
1029  { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 256, FLAGS },
1030  { "reset_sar", "reset SAR to 1 and scale to square pixels if scaling proportionally", OFFSET(reset_sar), AV_OPT_TYPE_BOOL, { .i64 = 0}, 0, 1, FLAGS },
1031  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, { .i64 = EVAL_MODE_INIT }, 0, EVAL_MODE_NB-1, FLAGS, .unit = "eval" },
1032  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, { .i64 = EVAL_MODE_INIT }, 0, 0, FLAGS, .unit = "eval" },
1033  { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, { .i64 = EVAL_MODE_FRAME }, 0, 0, FLAGS, .unit = "eval" },
1034  { NULL },
1035 };
1036 
1037 static const AVClass nppscale_class = {
1038  .class_name = "nppscale",
1039  .item_name = av_default_item_name,
1040  .option = options,
1041  .version = LIBAVUTIL_VERSION_INT,
1042  .category = AV_CLASS_CATEGORY_FILTER,
1043 };
1044 
1045 static const AVFilterPad nppscale_inputs[] = {
1046  {
1047  .name = "default",
1048  .type = AVMEDIA_TYPE_VIDEO,
1049  .filter_frame = nppscale_filter_frame,
1050  }
1051 };
1052 
1053 static const AVFilterPad nppscale_outputs[] = {
1054  {
1055  .name = "default",
1056  .type = AVMEDIA_TYPE_VIDEO,
1057  .config_props = config_props,
1058  }
1059 };
1060 
1062  .p.name = "scale_npp",
1063  .p.description = NULL_IF_CONFIG_SMALL("NVIDIA Performance Primitives video "
1064  "scaling and format conversion"),
1065  .p.priv_class = &nppscale_class,
1066 
1067  .init = nppscale_init,
1068  .uninit = nppscale_uninit,
1069 
1070  .priv_size = sizeof(NPPScaleContext),
1071 
1074 
1076 
1077  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
1078 };
1079 
1081  {
1082  .name = "default",
1083  .type = AVMEDIA_TYPE_VIDEO,
1084  .filter_frame = nppscale_filter_frame,
1085  },
1086  {
1087  .name = "ref",
1088  .type = AVMEDIA_TYPE_VIDEO,
1089  .filter_frame = nppscale_filter_frame_ref,
1090  }
1091 };
1092 
1094  {
1095  .name = "default",
1096  .type = AVMEDIA_TYPE_VIDEO,
1097  .config_props = config_props,
1098  .request_frame= request_frame,
1099  },
1100  {
1101  .name = "ref",
1102  .type = AVMEDIA_TYPE_VIDEO,
1103  .config_props = config_props_ref,
1104  .request_frame= request_frame_ref,
1105  }
1106 };
1107 
1109  .p.name = "scale2ref_npp",
1110  .p.description = NULL_IF_CONFIG_SMALL("NVIDIA Performance Primitives video "
1111  "scaling and format conversion to the "
1112  "given reference."),
1113  .p.priv_class = &nppscale_class,
1114 
1115  .init = nppscale_init,
1116  .uninit = nppscale_uninit,
1117 
1118  .priv_size = sizeof(NPPScaleContext),
1119 
1122 
1124 
1125  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
1126 };
format_is_supported
static int format_is_supported(enum AVPixelFormat fmt)
Definition: vf_scale_npp.c:513
AVHWDeviceContext::hwctx
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:86
NPPScaleContext::passthrough
int passthrough
Definition: vf_scale_npp.c:141
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_vf_scale_npp
const FFFilter ff_vf_scale_npp
Definition: vf_scale_npp.c:1061
VAR_OW
@ VAR_OW
Definition: vf_scale_npp.c:107
nppscale_inputs
static const AVFilterPad nppscale_inputs[]
Definition: vf_scale_npp.c:1045
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nppscale2ref_inputs
static const AVFilterPad nppscale2ref_inputs[]
Definition: vf_scale_npp.c:1080
opt.h
var_name
var_name
Definition: noise.c:47
hwcontext_cuda_internal.h
out
FILE * out
Definition: movenc.c:55
NPPScaleContext::h_pexpr
AVExpr * h_pexpr
Definition: vf_scale_npp.c:170
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1078
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3248
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
CHECK_CU
#define CHECK_CU(x)
Definition: vf_scale_npp.c:44
planes
static const struct @475 planes[]
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:198
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ScaleStage
ScaleStage
Definition: vf_scale_npp.c:57
nppscale_class
static const AVClass nppscale_class
Definition: vf_scale_npp.c:1037
VAR_S2R_MAIN_N
@ VAR_S2R_MAIN_N
Definition: vf_scale_npp.c:122
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:326
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:522
AVFrame::width
int width
Definition: frame.h:482
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:252
NPPScaleStageContext::stage_needed
int stage_needed
Definition: vf_scale_npp.c:65
VAR_A
@ VAR_A
Definition: vf_scale_npp.c:109
AVOption
AVOption.
Definition: opt.h:429
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:491
supported_formats
static enum AVPixelFormat supported_formats[]
Definition: vf_scale_npp.c:46
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
STAGE_NB
@ STAGE_NB
Definition: vf_scale_npp.c:61
VAR_SAR
@ VAR_SAR
Definition: vf_scale_npp.c:110
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:203
VAR_S2R_MAIN_T
@ VAR_S2R_MAIN_T
Definition: vf_scale_npp.c:123
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:218
video.h
NPPScaleStageContext::frame
AVFrame * frame
Definition: vf_scale_npp.c:75
VAR_OH
@ VAR_OH
Definition: vf_scale_npp.c:108
NPPScaleContext::size_str
char * size_str
Definition: vf_scale_npp.c:167
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:710
VAR_S2R_MAIN_POS
@ VAR_S2R_MAIN_POS
Definition: vf_scale.c:123
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3288
IS_SCALE2REF
#define IS_SCALE2REF(ctx)
Definition: vf_scale_npp.c:178
fail
#define fail()
Definition: checkasm.h:193
get_deinterleaved_format
static enum AVPixelFormat get_deinterleaved_format(enum AVPixelFormat fmt)
Definition: vf_scale_npp.c:523
dummy
int dummy
Definition: motion.c:66
STAGE_RESIZE
@ STAGE_RESIZE
Definition: vf_scale_npp.c:59
NPPScaleContext::tmp_frame
AVFrame * tmp_frame
Definition: vf_scale_npp.c:140
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3276
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
NPPScaleContext::shift_width
int shift_width
Definition: vf_scale_npp.c:143
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:358
AVRational::num
int num
Numerator.
Definition: rational.h:59
AV_SIDE_DATA_PROP_SIZE_DEPENDENT
@ AV_SIDE_DATA_PROP_SIZE_DEPENDENT
Side data depends on the video dimensions.
Definition: frame.h:292
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:151
NPPScaleStageContext::planes_in
struct NPPScaleStageContext::@356 planes_in[4]
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
VAR_IH
@ VAR_IH
Definition: vf_scale_npp.c:106
AVHWFramesContext::height
int height
Definition: hwcontext.h:218
FFFilter
Definition: filters.h:265
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
nppscale_uninit
static void nppscale_uninit(AVFilterContext *ctx)
Definition: vf_scale_npp.c:438
NPPScaleStageContext::planes_out
struct NPPScaleStageContext::@356 planes_out[4]
av_expr_count_vars
int av_expr_count_vars(AVExpr *e, unsigned *counter, int size)
Track the presence of variables and their number of occurrences in a parsed expression.
Definition: eval.c:782
NPPScaleContext::stages
NPPScaleStageContext stages[STAGE_NB]
Definition: vf_scale_npp.c:139
filters.h
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_scale_npp.c:646
NPPScaleContext::eval_mode
int eval_mode
Definition: vf_scale_npp.c:174
nppscale_eval_dimensions
static int nppscale_eval_dimensions(AVFilterContext *ctx)
Definition: vf_scale_npp.c:377
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:792
NPPScaleContext::force_original_aspect_ratio
int force_original_aspect_ratio
Definition: vf_scale_npp.c:161
AVExpr
Definition: eval.c:158
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
check_exprs
static int check_exprs(AVFilterContext *ctx)
Definition: vf_scale_npp.c:182
NAN
#define NAN
Definition: mathematics.h:115
nppscale2ref_outputs
static const AVFilterPad nppscale2ref_outputs[]
Definition: vf_scale_npp.c:1093
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
nppscale_scale
static int nppscale_scale(AVFilterLink *link, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:800
request_frame_ref
static int request_frame_ref(AVFilterLink *outlink)
Definition: vf_scale_npp.c:1003
NPPScaleContext
Definition: vf_scale_npp.c:136
if
if(ret)
Definition: filter_design.txt:179
FLAGS
#define FLAGS
Definition: vf_scale_npp.c:1009
var_names
static const char *const var_names[]
Definition: vf_scale_npp.c:78
init_stage
static int init_stage(NPPScaleStageContext *stage, AVBufferRef *device_ctx)
Definition: vf_scale_npp.c:454
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
nppscale_interleave
static int nppscale_interleave(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:768
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:211
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:726
VAR_POS
@ VAR_POS
Definition: noise.c:56
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:127
isnan
#define isnan(x)
Definition: libm.h:340
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:265
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_scale_npp.c:131
VAR_IW
@ VAR_IW
Definition: vf_scale_npp.c:105
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:239
parseutils.h
options
Definition: swscale.c:42
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: vf_scale_npp.c:998
NPPScaleContext::h_expr
char * h_expr
height expression string
Definition: vf_scale_npp.c:158
double
double
Definition: af_crystalizer.c:132
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:197
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
FF_FILTER_FLAG_HWFRAME_AWARE
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: filters.h:206
nppscale_parse_expr
static int nppscale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
Definition: vf_scale_npp.c:246
NPPScaleContext::shift_height
int shift_height
Definition: vf_scale_npp.c:143
NPPScaleStageContext::height
int height
Definition: vf_scale_npp.c:71
VAR_IN_H
@ VAR_IN_H
Definition: vf_scale_npp.c:106
VAR_S2R_MAIN_H
@ VAR_S2R_MAIN_H
Definition: vf_scale_npp.c:118
eval.h
VAR_S2R_MAIN_A
@ VAR_S2R_MAIN_A
Definition: vf_scale_npp.c:119
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
NPPScaleStageContext::out_fmt
enum AVPixelFormat out_fmt
Definition: vf_scale_npp.c:67
OFFSET
#define OFFSET(x)
Definition: vf_scale_npp.c:1008
NPPScaleStageContext::frames_ctx
AVBufferRef * frames_ctx
Definition: vf_scale_npp.c:74
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:537
nppscale_filter_frame_ref
static int nppscale_filter_frame_ref(AVFilterLink *link, AVFrame *in)
Definition: vf_scale_npp.c:962
TS2T
#define TS2T(ts, tb)
Definition: filters.h:481
AVFrame::pkt_pos
attribute_deprecated int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:728
nppscale_filter_frame
static int nppscale_filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale_npp.c:917
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
VAR_IN_W
@ VAR_IN_W
Definition: vf_scale_npp.c:105
scale_eval.h
init_processing_chain
static int init_processing_chain(AVFilterContext *ctx, int in_width, int in_height, int out_width, int out_height)
Definition: vf_scale_npp.c:537
VARS_NB
@ VARS_NB
Definition: vf_scale_npp.c:127
FF_API_FRAME_PKT
#define FF_API_FRAME_PKT
Definition: version.h:109
av_parse_video_size
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
Definition: parseutils.c:150
av_frame_side_data_remove_by_props
void av_frame_side_data_remove_by_props(AVFrameSideData ***sd, int *nb_sd, int props)
Remove and free all side data instances that match any of the given side data properties.
Definition: frame.c:967
VAR_S2R_MAIN_W
@ VAR_S2R_MAIN_W
Definition: vf_scale_npp.c:117
VAR_OUT_W
@ VAR_OUT_W
Definition: vf_scale_npp.c:107
nppscale_resize
static int nppscale_resize(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:740
VAR_S2R_MAIN_DAR
@ VAR_S2R_MAIN_DAR
Definition: vf_scale_npp.c:121
VAR_S2R_MAIN_SAR
@ VAR_S2R_MAIN_SAR
Definition: vf_scale_npp.c:120
NPPScaleContext::h
int h
Definition: vf_scale_npp.c:150
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
internal.h
EvalMode
EvalMode
Definition: af_volume.h:39
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_scale_npp.c:132
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:650
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:623
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
NPPScaleContext::w
int w
New dimensions.
Definition: vf_scale_npp.c:150
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:116
AVCUDADeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_cuda.h:42
config_props_ref
static int config_props_ref(AVFilterLink *outlink)
Definition: vf_scale_npp.c:698
ret
ret
Definition: filter_design.txt:187
NPPScaleContext::format
enum AVPixelFormat format
Output sw format.
Definition: vf_scale_npp.c:155
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
AVHWFramesContext::device_ctx
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:135
cuda_check.h
NPPScaleContext::interp_algo
int interp_algo
Definition: vf_scale_npp.c:165
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:517
NPPScaleContext::w_expr
char * w_expr
width expression string
Definition: vf_scale_npp.c:157
av_get_pix_fmt
enum AVPixelFormat av_get_pix_fmt(const char *name)
Return the pixel format corresponding to name.
Definition: pixdesc.c:3180
NPPScaleStageContext::in_fmt
enum AVPixelFormat in_fmt
Definition: vf_scale_npp.c:66
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:769
nppscale_init
static av_cold int nppscale_init(AVFilterContext *ctx)
Definition: vf_scale_npp.c:302
ff_vf_scale2ref_npp
const FFFilter ff_vf_scale2ref_npp
Definition: vf_scale_npp.c:177
AVFrame::height
int height
Definition: frame.h:482
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_scale_npp.c:133
NPPScaleContext::format_str
char * format_str
Definition: vf_scale_npp.c:159
VAR_DAR
@ VAR_DAR
Definition: vf_scale_npp.c:111
STAGE_INTERLEAVE
@ STAGE_INTERLEAVE
Definition: vf_scale_npp.c:60
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
nppscale_process
static int(*const nppscale_process[])(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:793
nppscale_outputs
static const AVFilterPad nppscale_outputs[]
Definition: vf_scale_npp.c:1053
NPPScaleContext::force_divisible_by
int force_divisible_by
Definition: vf_scale_npp.c:162
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
NPPScaleStageContext::width
int width
Definition: vf_scale_npp.c:70
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
VAR_S2R_MDAR
@ VAR_S2R_MDAR
Definition: vf_scale_npp.c:121
NPPScaleStageContext
Definition: vf_scale_npp.c:64
AVFilterContext
An instance of a filter.
Definition: avfilter.h:257
STAGE_DEINTERLEAVE
@ STAGE_DEINTERLEAVE
Definition: vf_scale_npp.c:58
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
VAR_T
@ VAR_T
Definition: vf_scale_npp.c:113
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
FFFilter::p
AVFilter p
The public AVFilter.
Definition: filters.h:269
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
options
static const AVOption options[]
Definition: vf_scale_npp.c:1010
nppscale_deinterleave
static int nppscale_deinterleave(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:716
VAR_OUT_H
@ VAR_OUT_H
Definition: vf_scale_npp.c:108
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
deinterleaved_formats
static enum AVPixelFormat deinterleaved_formats[][2]
Definition: vf_scale_npp.c:53
NPPScaleContext::reset_sar
int reset_sar
Definition: vf_scale_npp.c:163
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
NPPScaleContext::var_values
double var_values[VARS_NB]
Definition: vf_scale_npp.c:172
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:495
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: filters.h:252
snprintf
#define snprintf
Definition: snprintf.h:34
NPPScaleContext::w_pexpr
AVExpr * w_pexpr
Definition: vf_scale_npp.c:169
ff_scale_adjust_dimensions
int ff_scale_adjust_dimensions(AVFilterLink *inlink, int *ret_w, int *ret_h, int force_original_aspect_ratio, int force_divisible_by, double w_adj)
Transform evaluated width and height obtained from ff_scale_eval_dimensions into actual target width ...
Definition: scale_eval.c:113
src
#define src
Definition: vp8dsp.c:248
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3168
VAR_N
@ VAR_N
Definition: vf_scale_npp.c:112