FFmpeg
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
vf_scale_npp.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * scale video filter
22  */
23 
24 #include <nppi.h>
25 #include <stdio.h>
26 #include <string.h>
27 
28 #include "libavutil/hwcontext.h"
30 #include "libavutil/cuda_check.h"
31 #include "libavutil/internal.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/parseutils.h"
35 #include "libavutil/eval.h"
36 #include "libavutil/pixdesc.h"
37 
38 #include "avfilter.h"
39 #include "filters.h"
40 #include "formats.h"
41 #include "scale_eval.h"
42 #include "video.h"
43 
44 #define CHECK_CU(x) FF_CUDA_CHECK_DL(ctx, device_hwctx->internal->cuda_dl, x)
45 
46 static const enum AVPixelFormat supported_formats[] = {
51 };
52 
53 static const enum AVPixelFormat deinterleaved_formats[][2] = {
55 };
56 
57 enum ScaleStage {
62 };
63 
64 typedef struct NPPScaleStageContext {
68 
69  struct {
70  int width;
71  int height;
72  } planes_in[4], planes_out[4];
73 
77 
78 static const char *const var_names[] = {
79  "in_w", "iw",
80  "in_h", "ih",
81  "out_w", "ow",
82  "out_h", "oh",
83  "a",
84  "sar",
85  "dar",
86  "n",
87  "t",
88  "main_w",
89  "main_h",
90  "main_a",
91  "main_sar",
92  "main_dar", "mdar",
93  "main_n",
94  "main_t",
95  NULL
96 };
97 
98 enum var_name {
116 };
117 
118 enum EvalMode {
122 };
123 
124 typedef struct NPPScaleContext {
125  const AVClass *class;
126 
130 
132 
133  /**
134  * New dimensions. Special values are:
135  * 0 = original width/height
136  * -1 = keep original aspect
137  */
138  int w, h;
139 
140  /**
141  * Output sw format. AV_PIX_FMT_NONE for no conversion.
142  */
144 
145  char *w_expr; ///< width expression string
146  char *h_expr; ///< height expression string
147  char *format_str;
148 
152 
154 
155  char* size_str;
156 
159 
161 
164 
166 #define IS_SCALE2REF(ctx) ((ctx)->filter == &ff_vf_scale2ref_npp.p)
167 
168 static int config_props(AVFilterLink *outlink);
169 
171 {
172  NPPScaleContext* scale = ctx->priv;
173  unsigned vars_w[VARS_NB] = {0}, vars_h[VARS_NB] = {0};
174 
175  if (!scale->w_pexpr && !scale->h_pexpr)
176  return AVERROR(EINVAL);
177 
178  if (scale->w_pexpr)
179  av_expr_count_vars(scale->w_pexpr, vars_w, VARS_NB);
180  if (scale->h_pexpr)
181  av_expr_count_vars(scale->h_pexpr, vars_h, VARS_NB);
182 
183  if (vars_w[VAR_OUT_W] || vars_w[VAR_OW]) {
184  av_log(ctx, AV_LOG_ERROR, "Width expression cannot be self-referencing: '%s'.\n", scale->w_expr);
185  return AVERROR(EINVAL);
186  }
187 
188  if (vars_h[VAR_OUT_H] || vars_h[VAR_OH]) {
189  av_log(ctx, AV_LOG_ERROR, "Height expression cannot be self-referencing: '%s'.\n", scale->h_expr);
190  return AVERROR(EINVAL);
191  }
192 
193  if ((vars_w[VAR_OUT_H] || vars_w[VAR_OH]) &&
194  (vars_h[VAR_OUT_W] || vars_h[VAR_OW])) {
195  av_log(ctx, AV_LOG_WARNING, "Circular references detected for width '%s' and height '%s' - possibly invalid.\n", scale->w_expr, scale->h_expr);
196  }
197 
198  if (!IS_SCALE2REF(ctx) &&
199  (vars_w[VAR_S2R_MAIN_W] || vars_h[VAR_S2R_MAIN_W] ||
200  vars_w[VAR_S2R_MAIN_H] || vars_h[VAR_S2R_MAIN_H] ||
201  vars_w[VAR_S2R_MAIN_A] || vars_h[VAR_S2R_MAIN_A] ||
202  vars_w[VAR_S2R_MAIN_SAR] || vars_h[VAR_S2R_MAIN_SAR] ||
203  vars_w[VAR_S2R_MAIN_DAR] || vars_h[VAR_S2R_MAIN_DAR] ||
204  vars_w[VAR_S2R_MDAR] || vars_h[VAR_S2R_MDAR] ||
205  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
206  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T])) {
207  av_log(ctx, AV_LOG_ERROR, "Expressions with scale2ref_npp variables are not valid in scale_npp filter.\n");
208  return AVERROR(EINVAL);
209  }
210 
211  if (scale->eval_mode == EVAL_MODE_INIT &&
212  (vars_w[VAR_N] || vars_h[VAR_N] ||
213  vars_w[VAR_T] || vars_h[VAR_T] ||
214  vars_w[VAR_S2R_MAIN_N] || vars_h[VAR_S2R_MAIN_N] ||
215  vars_w[VAR_S2R_MAIN_T] || vars_h[VAR_S2R_MAIN_T])) {
216  av_log(ctx, AV_LOG_ERROR, "Expressions with frame variables 'n', 't', are not valid in init eval_mode.\n");
217  return AVERROR(EINVAL);
218  }
219 
220  return 0;
221 }
222 
223 static int nppscale_parse_expr(AVFilterContext* ctx, char* str_expr,
224  AVExpr** pexpr_ptr, const char* var,
225  const char* args)
226 {
227  NPPScaleContext* scale = ctx->priv;
228  int ret, is_inited = 0;
229  char* old_str_expr = NULL;
230  AVExpr* old_pexpr = NULL;
231 
232  if (str_expr) {
233  old_str_expr = av_strdup(str_expr);
234  if (!old_str_expr)
235  return AVERROR(ENOMEM);
236  av_opt_set(scale, var, args, 0);
237  }
238 
239  if (*pexpr_ptr) {
240  old_pexpr = *pexpr_ptr;
241  *pexpr_ptr = NULL;
242  is_inited = 1;
243  }
244 
245  ret = av_expr_parse(pexpr_ptr, args, var_names, NULL, NULL, NULL, NULL, 0,
246  ctx);
247  if (ret < 0) {
248  av_log(ctx, AV_LOG_ERROR, "Cannot parse expression for %s: '%s'\n", var,
249  args);
250  goto revert;
251  }
252 
253  ret = check_exprs(ctx);
254  if (ret < 0)
255  goto revert;
256 
257  if (is_inited && (ret = config_props(ctx->outputs[0])) < 0)
258  goto revert;
259 
260  av_expr_free(old_pexpr);
261  old_pexpr = NULL;
262  av_freep(&old_str_expr);
263 
264  return 0;
265 
266 revert:
267  av_expr_free(*pexpr_ptr);
268  *pexpr_ptr = NULL;
269  if (old_str_expr) {
270  av_opt_set(scale, var, old_str_expr, 0);
271  av_free(old_str_expr);
272  }
273  if (old_pexpr)
274  *pexpr_ptr = old_pexpr;
275 
276  return ret;
277 }
278 
280 {
281  NPPScaleContext* scale = ctx->priv;
282  int i, ret;
283 
284  if (!strcmp(scale->format_str, "same")) {
285  scale->format = AV_PIX_FMT_NONE;
286  } else {
287  scale->format = av_get_pix_fmt(scale->format_str);
288  if (scale->format == AV_PIX_FMT_NONE) {
289  av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", scale->format_str);
290  return AVERROR(EINVAL);
291  }
292  }
293 
294  if (scale->size_str && (scale->w_expr || scale->h_expr)) {
296  "Size and width/height exprs cannot be set at the same time.\n");
297  return AVERROR(EINVAL);
298  }
299 
300  if (scale->w_expr && !scale->h_expr)
301  FFSWAP(char*, scale->w_expr, scale->size_str);
302 
303  if (scale->size_str) {
304  char buf[32];
305  ret = av_parse_video_size(&scale->w, &scale->h, scale->size_str);
306  if (0 > ret) {
307  av_log(ctx, AV_LOG_ERROR, "Invalid size '%s'\n", scale->size_str);
308  return ret;
309  }
310 
311  snprintf(buf, sizeof(buf) - 1, "%d", scale->w);
312  ret = av_opt_set(scale, "w", buf, 0);
313  if (ret < 0)
314  return ret;
315 
316  snprintf(buf, sizeof(buf) - 1, "%d", scale->h);
317  ret = av_opt_set(scale, "h", buf, 0);
318  if (ret < 0)
319  return ret;
320  }
321 
322  if (!scale->w_expr) {
323  ret = av_opt_set(scale, "w", "iw", 0);
324  if (ret < 0)
325  return ret;
326  }
327 
328  if (!scale->h_expr) {
329  ret = av_opt_set(scale, "h", "ih", 0);
330  if (ret < 0)
331  return ret;
332  }
333 
334  ret = nppscale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr);
335  if (ret < 0)
336  return ret;
337 
338  ret = nppscale_parse_expr(ctx, NULL, &scale->h_pexpr, "height", scale->h_expr);
339  if (ret < 0)
340  return ret;
341 
342  for (i = 0; i < FF_ARRAY_ELEMS(scale->stages); i++) {
343  scale->stages[i].frame = av_frame_alloc();
344  if (!scale->stages[i].frame)
345  return AVERROR(ENOMEM);
346  }
347  scale->tmp_frame = av_frame_alloc();
348  if (!scale->tmp_frame)
349  return AVERROR(ENOMEM);
350 
351  return 0;
352 }
353 
355 {
356  NPPScaleContext* scale = ctx->priv;
357  const char scale2ref = IS_SCALE2REF(ctx);
358  const AVFilterLink* inlink = ctx->inputs[scale2ref ? 1 : 0];
359  char* expr;
360  int eval_w, eval_h;
361  int ret;
362  double res;
363 
364  scale->var_values[VAR_IN_W] = scale->var_values[VAR_IW] = inlink->w;
365  scale->var_values[VAR_IN_H] = scale->var_values[VAR_IH] = inlink->h;
366  scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = NAN;
367  scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = NAN;
368  scale->var_values[VAR_A] = (double)inlink->w / inlink->h;
369  scale->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
370  (double)inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
371  scale->var_values[VAR_DAR] = scale->var_values[VAR_A] * scale->var_values[VAR_SAR];
372 
373  if (scale2ref) {
374  const AVFilterLink* main_link = ctx->inputs[0];
375 
376  scale->var_values[VAR_S2R_MAIN_W] = main_link->w;
377  scale->var_values[VAR_S2R_MAIN_H] = main_link->h;
378  scale->var_values[VAR_S2R_MAIN_A] = (double)main_link->w / main_link->h;
379  scale->var_values[VAR_S2R_MAIN_SAR] = main_link->sample_aspect_ratio.num ?
380  (double)main_link->sample_aspect_ratio.num / main_link->sample_aspect_ratio.den : 1;
381  scale->var_values[VAR_S2R_MAIN_DAR] = scale->var_values[VAR_S2R_MDAR] =
382  scale->var_values[VAR_S2R_MAIN_A] * scale->var_values[VAR_S2R_MAIN_SAR];
383  }
384 
385  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
386  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int)res == 0 ? inlink->w : (int)res;
387 
388  res = av_expr_eval(scale->h_pexpr, scale->var_values, NULL);
389  if (isnan(res)) {
390  expr = scale->h_expr;
391  ret = AVERROR(EINVAL);
392  goto fail;
393  }
394  eval_h = scale->var_values[VAR_OUT_H] = scale->var_values[VAR_OH] = (int)res == 0 ? inlink->h : (int)res;
395 
396  res = av_expr_eval(scale->w_pexpr, scale->var_values, NULL);
397  if (isnan(res)) {
398  expr = scale->w_expr;
399  ret = AVERROR(EINVAL);
400  goto fail;
401  }
402  eval_w = scale->var_values[VAR_OUT_W] = scale->var_values[VAR_OW] = (int)res == 0 ? inlink->w : (int)res;
403 
404  scale->w = eval_w;
405  scale->h = eval_h;
406 
407  return 0;
408 
409 fail:
410  av_log(ctx, AV_LOG_ERROR, "Error when evaluating the expression '%s'.\n",
411  expr);
412  return ret;
413 }
414 
416 {
417  NPPScaleContext *s = ctx->priv;
418  int i;
419 
420  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
421  av_frame_free(&s->stages[i].frame);
422  av_buffer_unref(&s->stages[i].frames_ctx);
423  }
424  av_frame_free(&s->tmp_frame);
425 
426  av_expr_free(s->w_pexpr);
427  av_expr_free(s->h_pexpr);
428  s->w_pexpr = s->h_pexpr = NULL;
429 }
430 
431 static int init_stage(NPPScaleStageContext *stage, AVBufferRef *device_ctx)
432 {
433  AVBufferRef *out_ref = NULL;
434  AVHWFramesContext *out_ctx;
435  int in_sw, in_sh, out_sw, out_sh;
436  int ret, i;
437 
438  av_pix_fmt_get_chroma_sub_sample(stage->in_fmt, &in_sw, &in_sh);
439  av_pix_fmt_get_chroma_sub_sample(stage->out_fmt, &out_sw, &out_sh);
440  if (!stage->planes_out[0].width) {
441  stage->planes_out[0].width = stage->planes_in[0].width;
442  stage->planes_out[0].height = stage->planes_in[0].height;
443  }
444 
445  for (i = 1; i < FF_ARRAY_ELEMS(stage->planes_in); i++) {
446  stage->planes_in[i].width = stage->planes_in[0].width >> in_sw;
447  stage->planes_in[i].height = stage->planes_in[0].height >> in_sh;
448  stage->planes_out[i].width = stage->planes_out[0].width >> out_sw;
449  stage->planes_out[i].height = stage->planes_out[0].height >> out_sh;
450  }
451 
452  if (AV_PIX_FMT_YUVA420P == stage->in_fmt) {
453  stage->planes_in[3].width = stage->planes_in[0].width;
454  stage->planes_in[3].height = stage->planes_in[0].height;
455  stage->planes_out[3].width = stage->planes_out[0].width;
456  stage->planes_out[3].height = stage->planes_out[0].height;
457  }
458 
459  out_ref = av_hwframe_ctx_alloc(device_ctx);
460  if (!out_ref)
461  return AVERROR(ENOMEM);
462  out_ctx = (AVHWFramesContext*)out_ref->data;
463 
464  out_ctx->format = AV_PIX_FMT_CUDA;
465  out_ctx->sw_format = stage->out_fmt;
466  out_ctx->width = FFALIGN(stage->planes_out[0].width, 32);
467  out_ctx->height = FFALIGN(stage->planes_out[0].height, 32);
468 
469  ret = av_hwframe_ctx_init(out_ref);
470  if (ret < 0)
471  goto fail;
472 
473  av_frame_unref(stage->frame);
474  ret = av_hwframe_get_buffer(out_ref, stage->frame, 0);
475  if (ret < 0)
476  goto fail;
477 
478  stage->frame->width = stage->planes_out[0].width;
479  stage->frame->height = stage->planes_out[0].height;
480 
481  av_buffer_unref(&stage->frames_ctx);
482  stage->frames_ctx = out_ref;
483 
484  return 0;
485 fail:
486  av_buffer_unref(&out_ref);
487  return ret;
488 }
489 
490 static int format_is_supported(enum AVPixelFormat fmt)
491 {
492  int i;
493 
494  for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
495  if (supported_formats[i] == fmt)
496  return 1;
497  return 0;
498 }
499 
501 {
503  int i, planes;
504 
506  if (planes == desc->nb_components)
507  return fmt;
508  for (i = 0; i < FF_ARRAY_ELEMS(deinterleaved_formats); i++)
509  if (deinterleaved_formats[i][0] == fmt)
510  return deinterleaved_formats[i][1];
511  return AV_PIX_FMT_NONE;
512 }
513 
514 static int init_processing_chain(AVFilterContext *ctx, int in_width, int in_height,
515  int out_width, int out_height)
516 {
517  NPPScaleContext *s = ctx->priv;
518  FilterLink *inl = ff_filter_link(ctx->inputs[0]);
519  FilterLink *outl = ff_filter_link(ctx->outputs[0]);
520 
521  AVHWFramesContext *in_frames_ctx;
522 
523  enum AVPixelFormat in_format;
524  enum AVPixelFormat out_format;
525  enum AVPixelFormat in_deinterleaved_format;
526  enum AVPixelFormat out_deinterleaved_format;
527 
528  int i, ret, last_stage = -1;
529 
530  /* check that we have a hw context */
531  if (!inl->hw_frames_ctx) {
532  av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n");
533  return AVERROR(EINVAL);
534  }
535  in_frames_ctx = (AVHWFramesContext*)inl->hw_frames_ctx->data;
536  in_format = in_frames_ctx->sw_format;
537  out_format = (s->format == AV_PIX_FMT_NONE) ? in_format : s->format;
538 
539  if (!format_is_supported(in_format)) {
540  av_log(ctx, AV_LOG_ERROR, "Unsupported input format: %s\n",
541  av_get_pix_fmt_name(in_format));
542  return AVERROR(ENOSYS);
543  }
544  if (!format_is_supported(out_format)) {
545  av_log(ctx, AV_LOG_ERROR, "Unsupported output format: %s\n",
546  av_get_pix_fmt_name(out_format));
547  return AVERROR(ENOSYS);
548  }
549 
550  in_deinterleaved_format = get_deinterleaved_format(in_format);
551  out_deinterleaved_format = get_deinterleaved_format(out_format);
552  if (in_deinterleaved_format == AV_PIX_FMT_NONE ||
553  out_deinterleaved_format == AV_PIX_FMT_NONE)
554  return AVERROR_BUG;
555 
556  /* figure out which stages need to be done */
557  if (in_width != out_width || in_height != out_height ||
558  in_deinterleaved_format != out_deinterleaved_format) {
559  s->stages[STAGE_RESIZE].stage_needed = 1;
560 
561  if (s->interp_algo == NPPI_INTER_SUPER &&
562  (out_width > in_width && out_height > in_height)) {
563  s->interp_algo = NPPI_INTER_LANCZOS;
564  av_log(ctx, AV_LOG_WARNING, "super-sampling not supported for output dimensions, using lanczos instead.\n");
565  }
566  if (s->interp_algo == NPPI_INTER_SUPER &&
567  !(out_width < in_width && out_height < in_height)) {
568  s->interp_algo = NPPI_INTER_CUBIC;
569  av_log(ctx, AV_LOG_WARNING, "super-sampling not supported for output dimensions, using cubic instead.\n");
570  }
571  }
572 
573  if (!s->stages[STAGE_RESIZE].stage_needed && in_format == out_format)
574  s->passthrough = 1;
575 
576  if (!s->passthrough) {
577  if (in_format != in_deinterleaved_format)
578  s->stages[STAGE_DEINTERLEAVE].stage_needed = 1;
579  if (out_format != out_deinterleaved_format)
580  s->stages[STAGE_INTERLEAVE].stage_needed = 1;
581  }
582 
583  s->stages[STAGE_DEINTERLEAVE].in_fmt = in_format;
584  s->stages[STAGE_DEINTERLEAVE].out_fmt = in_deinterleaved_format;
585  s->stages[STAGE_DEINTERLEAVE].planes_in[0].width = in_width;
586  s->stages[STAGE_DEINTERLEAVE].planes_in[0].height = in_height;
587 
588  s->stages[STAGE_RESIZE].in_fmt = in_deinterleaved_format;
589  s->stages[STAGE_RESIZE].out_fmt = out_deinterleaved_format;
590  s->stages[STAGE_RESIZE].planes_in[0].width = in_width;
591  s->stages[STAGE_RESIZE].planes_in[0].height = in_height;
592  s->stages[STAGE_RESIZE].planes_out[0].width = out_width;
593  s->stages[STAGE_RESIZE].planes_out[0].height = out_height;
594 
595  s->stages[STAGE_INTERLEAVE].in_fmt = out_deinterleaved_format;
596  s->stages[STAGE_INTERLEAVE].out_fmt = out_format;
597  s->stages[STAGE_INTERLEAVE].planes_in[0].width = out_width;
598  s->stages[STAGE_INTERLEAVE].planes_in[0].height = out_height;
599 
600  /* init the hardware contexts */
601  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
602  if (!s->stages[i].stage_needed)
603  continue;
604 
605  ret = init_stage(&s->stages[i], in_frames_ctx->device_ref);
606  if (ret < 0)
607  return ret;
608 
609  last_stage = i;
610  }
611 
612  if (last_stage >= 0)
613  outl->hw_frames_ctx = av_buffer_ref(s->stages[last_stage].frames_ctx);
614  else
616 
617  if (!outl->hw_frames_ctx)
618  return AVERROR(ENOMEM);
619 
620  return 0;
621 }
622 
623 static int config_props(AVFilterLink *outlink)
624 {
625  AVFilterContext *ctx = outlink->src;
626  AVFilterLink *inlink0 = outlink->src->inputs[0];
628  outlink->src->inputs[1] :
629  outlink->src->inputs[0];
630  NPPScaleContext *s = ctx->priv;
631  double w_adj = 1.0;
632  int ret;
633 
634  if ((ret = nppscale_eval_dimensions(ctx)) < 0)
635  goto fail;
636 
637  if (s->reset_sar)
638  w_adj = IS_SCALE2REF(ctx) ? s->var_values[VAR_S2R_MAIN_SAR] :
639  s->var_values[VAR_SAR];
640 
642  s->force_original_aspect_ratio,
643  s->force_divisible_by, w_adj);
644 
645  if (s->w > INT_MAX || s->h > INT_MAX ||
646  (s->h * inlink->w) > INT_MAX ||
647  (s->w * inlink->h) > INT_MAX)
648  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
649 
650  outlink->w = s->w;
651  outlink->h = s->h;
652 
653  ret = init_processing_chain(ctx, inlink0->w, inlink0->h, outlink->w, outlink->h);
654  if (ret < 0)
655  return ret;
656 
657  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
658  inlink->w, inlink->h, outlink->w, outlink->h);
659 
660  if (s->reset_sar)
661  outlink->sample_aspect_ratio = (AVRational){1, 1};
662  else if (inlink->sample_aspect_ratio.num)
663  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
664  outlink->w*inlink->h},
665  inlink->sample_aspect_ratio);
666  else
667  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
668 
669  return 0;
670 
671 fail:
672  return ret;
673 }
674 
675 static int config_props_ref(AVFilterLink *outlink)
676 {
677  FilterLink *outl = ff_filter_link(outlink);
678  AVFilterLink *inlink = outlink->src->inputs[1];
680  FilterLink *ol = ff_filter_link(outlink);
681 
682  outlink->w = inlink->w;
683  outlink->h = inlink->h;
684  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
685  outlink->time_base = inlink->time_base;
686  ol->frame_rate = inl->frame_rate;
687 
689 
690  return 0;
691 }
692 
694  AVFrame *out, AVFrame *in)
695 {
696  AVHWFramesContext *in_frames_ctx = (AVHWFramesContext*)in->hw_frames_ctx->data;
697  NppStatus err;
698 
699  switch (in_frames_ctx->sw_format) {
700  case AV_PIX_FMT_NV12:
701  err = nppiYCbCr420_8u_P2P3R(in->data[0], in->linesize[0],
702  in->data[1], in->linesize[1],
703  out->data, out->linesize,
704  (NppiSize){ in->width, in->height });
705  break;
706  default:
707  return AVERROR_BUG;
708  }
709  if (err != NPP_SUCCESS) {
710  av_log(ctx, AV_LOG_ERROR, "NPP deinterleave error: %d\n", err);
711  return AVERROR_UNKNOWN;
712  }
713 
714  return 0;
715 }
716 
718  AVFrame *out, AVFrame *in)
719 {
720  NPPScaleContext *s = ctx->priv;
721  NppStatus err;
722  int i;
723 
724  for (i = 0; i < FF_ARRAY_ELEMS(stage->planes_in) && i < FF_ARRAY_ELEMS(in->data) && in->data[i]; i++) {
725  int iw = stage->planes_in[i].width;
726  int ih = stage->planes_in[i].height;
727  int ow = stage->planes_out[i].width;
728  int oh = stage->planes_out[i].height;
729 
730  err = nppiResizeSqrPixel_8u_C1R(in->data[i], (NppiSize){ iw, ih },
731  in->linesize[i], (NppiRect){ 0, 0, iw, ih },
732  out->data[i], out->linesize[i],
733  (NppiRect){ 0, 0, ow, oh },
734  (double)ow / iw, (double)oh / ih,
735  0.0, 0.0, s->interp_algo);
736  if (err != NPP_SUCCESS) {
737  av_log(ctx, AV_LOG_ERROR, "NPP resize error: %d\n", err);
738  return AVERROR_UNKNOWN;
739  }
740  }
741 
742  return 0;
743 }
744 
746  AVFrame *out, AVFrame *in)
747 {
748  AVHWFramesContext *out_frames_ctx = (AVHWFramesContext*)out->hw_frames_ctx->data;
749  NppStatus err;
750 
751  switch (out_frames_ctx->sw_format) {
752  case AV_PIX_FMT_NV12:
753  err = nppiYCbCr420_8u_P3P2R((const uint8_t**)in->data,
754  in->linesize,
755  out->data[0], out->linesize[0],
756  out->data[1], out->linesize[1],
757  (NppiSize){ in->width, in->height });
758  break;
759  default:
760  return AVERROR_BUG;
761  }
762  if (err != NPP_SUCCESS) {
763  av_log(ctx, AV_LOG_ERROR, "NPP deinterleave error: %d\n", err);
764  return AVERROR_UNKNOWN;
765  }
766 
767  return 0;
768 }
769 
771  AVFrame *out, AVFrame *in) = {
775 };
776 
778 {
780  AVFilterContext *ctx = link->dst;
781  NPPScaleContext *s = ctx->priv;
782  AVFilterLink *outlink = ctx->outputs[0];
783  AVFrame *src = in;
784  char buf[32];
785  int i, ret, last_stage = -1;
786  int frame_changed;
787 
788  frame_changed = in->width != link->w ||
789  in->height != link->h ||
790  in->format != link->format ||
793 
794  if (s->eval_mode == EVAL_MODE_FRAME || frame_changed) {
795  unsigned vars_w[VARS_NB] = { 0 }, vars_h[VARS_NB] = { 0 };
796 
797  av_expr_count_vars(s->w_pexpr, vars_w, VARS_NB);
798  av_expr_count_vars(s->h_pexpr, vars_h, VARS_NB);
799 
800  if (s->eval_mode == EVAL_MODE_FRAME && !frame_changed && !IS_SCALE2REF(ctx) &&
801  !(vars_w[VAR_N] || vars_w[VAR_T]) &&
802  !(vars_h[VAR_N] || vars_h[VAR_T]) && s->w && s->h)
803  goto scale;
804 
805  if (s->eval_mode == EVAL_MODE_INIT) {
806  snprintf(buf, sizeof(buf)-1, "%d", outlink->w);
807  av_opt_set(s, "w", buf, 0);
808  snprintf(buf, sizeof(buf)-1, "%d", outlink->h);
809  av_opt_set(s, "h", buf, 0);
810 
811  ret = nppscale_parse_expr(ctx, NULL, &s->w_pexpr, "width", s->w_expr);
812  if (ret < 0)
813  return ret;
814 
815  ret = nppscale_parse_expr(ctx, NULL, &s->h_pexpr, "height", s->h_expr);
816  if (ret < 0)
817  return ret;
818  }
819 
820  if (IS_SCALE2REF(ctx)) {
821  s->var_values[VAR_S2R_MAIN_N] = inl->frame_count_out;
822  s->var_values[VAR_S2R_MAIN_T] = TS2T(in->pts, link->time_base);
823  } else {
824  s->var_values[VAR_N] = inl->frame_count_out;
825  s->var_values[VAR_T] = TS2T(in->pts, link->time_base);
826  }
827 
828  link->format = in->format;
829  link->w = in->width;
830  link->h = in->height;
831 
834 
835  if ((ret = config_props(outlink)) < 0)
836  return ret;
837  }
838 
839 scale:
840  for (i = 0; i < FF_ARRAY_ELEMS(s->stages); i++) {
841  if (!s->stages[i].stage_needed)
842  continue;
843 
844  ret = nppscale_process[i](ctx, &s->stages[i], s->stages[i].frame, src);
845  if (ret < 0)
846  return ret;
847 
848  src = s->stages[i].frame;
849  last_stage = i;
850  }
851  if (last_stage < 0)
852  return AVERROR_BUG;
853 
854  ret = av_hwframe_get_buffer(src->hw_frames_ctx, s->tmp_frame, 0);
855  if (ret < 0)
856  return ret;
857 
858  s->tmp_frame->width = src->width;
859  s->tmp_frame->height = src->height;
860 
862  av_frame_move_ref(src, s->tmp_frame);
863 
864  ret = av_frame_copy_props(out, in);
865  if (ret < 0)
866  return ret;
867 
868  if (out->width != in->width || out->height != in->height) {
869  av_frame_side_data_remove_by_props(&out->side_data, &out->nb_side_data,
871  }
872 
873  return 0;
874 }
875 
877 {
878  AVFilterContext *ctx = link->dst;
879  NPPScaleContext *s = ctx->priv;
880  AVFilterLink *outlink = ctx->outputs[0];
881  FilterLink *l = ff_filter_link(outlink);
883  AVCUDADeviceContext *device_hwctx = frames_ctx->device_ctx->hwctx;
884 
885  AVFrame *out = NULL;
886  CUcontext dummy;
887  int ret = 0;
888 
889  if (s->passthrough)
890  return ff_filter_frame(outlink, in);
891 
892  out = av_frame_alloc();
893  if (!out) {
894  ret = AVERROR(ENOMEM);
895  goto fail;
896  }
897 
898  ret = CHECK_CU(device_hwctx->internal->cuda_dl->cuCtxPushCurrent(device_hwctx->cuda_ctx));
899  if (ret < 0)
900  goto fail;
901 
902  ret = nppscale_scale(link, out, in);
903 
904  CHECK_CU(device_hwctx->internal->cuda_dl->cuCtxPopCurrent(&dummy));
905  if (ret < 0)
906  goto fail;
907 
908  av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
909  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
910  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
911  INT_MAX);
912 
913  av_frame_free(&in);
914  return ff_filter_frame(outlink, out);
915 fail:
916  av_frame_free(&in);
917  av_frame_free(&out);
918  return ret;
919 }
920 
922 {
924  NPPScaleContext *scale = link->dst->priv;
925  AVFilterLink *outlink = link->dst->outputs[1];
926  int frame_changed;
927 
928  frame_changed = in->width != link->w ||
929  in->height != link->h ||
930  in->format != link->format ||
933 
934  if (frame_changed) {
935  link->format = in->format;
936  link->w = in->width;
937  link->h = in->height;
940 
941  config_props_ref(outlink);
942  }
943 
944  if (scale->eval_mode == EVAL_MODE_FRAME) {
945  scale->var_values[VAR_N] = inl->frame_count_out;
946  scale->var_values[VAR_T] = TS2T(in->pts, link->time_base);
947  }
948 
949  return ff_filter_frame(outlink, in);
950 }
951 
952 static int request_frame(AVFilterLink *outlink)
953 {
954  return ff_request_frame(outlink->src->inputs[0]);
955 }
956 
957 static int request_frame_ref(AVFilterLink *outlink)
958 {
959  return ff_request_frame(outlink->src->inputs[1]);
960 }
961 
962 #define OFFSET(x) offsetof(NPPScaleContext, x)
963 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
964 static const AVOption options[] = {
965  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
966  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, .flags = FLAGS },
967  { "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
968  { "s", "Output video size", OFFSET(size_str), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = FLAGS },
969 
970  { "interp_algo", "Interpolation algorithm used for resizing", OFFSET(interp_algo), AV_OPT_TYPE_INT, { .i64 = NPPI_INTER_CUBIC }, 0, INT_MAX, FLAGS, .unit = "interp_algo" },
971  { "nn", "nearest neighbour", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_NN }, 0, 0, FLAGS, .unit = "interp_algo" },
972  { "linear", "linear", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_LINEAR }, 0, 0, FLAGS, .unit = "interp_algo" },
973  { "cubic", "cubic", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC }, 0, 0, FLAGS, .unit = "interp_algo" },
974  { "cubic2p_bspline", "2-parameter cubic (B=1, C=0)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_BSPLINE }, 0, 0, FLAGS, .unit = "interp_algo" },
975  { "cubic2p_catmullrom", "2-parameter cubic (B=0, C=1/2)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_CATMULLROM }, 0, 0, FLAGS, .unit = "interp_algo" },
976  { "cubic2p_b05c03", "2-parameter cubic (B=1/2, C=3/10)", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_CUBIC2P_B05C03 }, 0, 0, FLAGS, .unit = "interp_algo" },
977  { "super", "supersampling", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_SUPER }, 0, 0, FLAGS, .unit = "interp_algo" },
978  { "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, { .i64 = NPPI_INTER_LANCZOS }, 0, 0, FLAGS, .unit = "interp_algo" },
979  { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2, FLAGS, .unit = "force_oar" },
980  { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, .unit = "force_oar" },
981  { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, .unit = "force_oar" },
982  { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, .unit = "force_oar" },
983  { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 256, FLAGS },
984  { "reset_sar", "reset SAR to 1 and scale to square pixels if scaling proportionally", OFFSET(reset_sar), AV_OPT_TYPE_BOOL, { .i64 = 0}, 0, 1, FLAGS },
985  { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, { .i64 = EVAL_MODE_INIT }, 0, EVAL_MODE_NB-1, FLAGS, .unit = "eval" },
986  { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, { .i64 = EVAL_MODE_INIT }, 0, 0, FLAGS, .unit = "eval" },
987  { "frame", "eval expressions during initialization and per-frame", 0, AV_OPT_TYPE_CONST, { .i64 = EVAL_MODE_FRAME }, 0, 0, FLAGS, .unit = "eval" },
988  { NULL },
989 };
990 
991 static const AVClass nppscale_class = {
992  .class_name = "nppscale",
993  .item_name = av_default_item_name,
994  .option = options,
995  .version = LIBAVUTIL_VERSION_INT,
996  .category = AV_CLASS_CATEGORY_FILTER,
997 };
998 
999 static const AVFilterPad nppscale_inputs[] = {
1000  {
1001  .name = "default",
1002  .type = AVMEDIA_TYPE_VIDEO,
1003  .filter_frame = nppscale_filter_frame,
1004  }
1005 };
1006 
1007 static const AVFilterPad nppscale_outputs[] = {
1008  {
1009  .name = "default",
1010  .type = AVMEDIA_TYPE_VIDEO,
1011  .config_props = config_props,
1012  }
1013 };
1014 
1016  .p.name = "scale_npp",
1017  .p.description = NULL_IF_CONFIG_SMALL("NVIDIA Performance Primitives video "
1018  "scaling and format conversion"),
1019  .p.priv_class = &nppscale_class,
1020 
1021  .init = nppscale_init,
1022  .uninit = nppscale_uninit,
1023 
1024  .priv_size = sizeof(NPPScaleContext),
1025 
1028 
1030 
1031  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
1032 };
1033 
1035  {
1036  .name = "default",
1037  .type = AVMEDIA_TYPE_VIDEO,
1038  .filter_frame = nppscale_filter_frame,
1039  },
1040  {
1041  .name = "ref",
1042  .type = AVMEDIA_TYPE_VIDEO,
1043  .filter_frame = nppscale_filter_frame_ref,
1044  }
1045 };
1046 
1048  {
1049  .name = "default",
1050  .type = AVMEDIA_TYPE_VIDEO,
1051  .config_props = config_props,
1052  .request_frame= request_frame,
1053  },
1054  {
1055  .name = "ref",
1056  .type = AVMEDIA_TYPE_VIDEO,
1057  .config_props = config_props_ref,
1058  .request_frame= request_frame_ref,
1059  }
1060 };
1061 
1063  .p.name = "scale2ref_npp",
1064  .p.description = NULL_IF_CONFIG_SMALL("NVIDIA Performance Primitives video "
1065  "scaling and format conversion to the "
1066  "given reference."),
1067  .p.priv_class = &nppscale_class,
1068 
1069  .init = nppscale_init,
1070  .uninit = nppscale_uninit,
1071 
1072  .priv_size = sizeof(NPPScaleContext),
1073 
1076 
1078 
1079  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
1080 };
format_is_supported
static int format_is_supported(enum AVPixelFormat fmt)
Definition: vf_scale_npp.c:490
AVHWDeviceContext::hwctx
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:86
NPPScaleContext::passthrough
int passthrough
Definition: vf_scale_npp.c:129
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_vf_scale_npp
const FFFilter ff_vf_scale_npp
Definition: vf_scale_npp.c:1015
VAR_OW
@ VAR_OW
Definition: vf_scale_npp.c:101
nppscale_inputs
static const AVFilterPad nppscale_inputs[]
Definition: vf_scale_npp.c:999
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nppscale2ref_inputs
static const AVFilterPad nppscale2ref_inputs[]
Definition: vf_scale_npp.c:1034
opt.h
var_name
var_name
Definition: noise.c:47
hwcontext_cuda_internal.h
out
FILE * out
Definition: movenc.c:55
NPPScaleContext::h_pexpr
AVExpr * h_pexpr
Definition: vf_scale_npp.c:158
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1053
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3341
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
CHECK_CU
#define CHECK_CU(x)
Definition: vf_scale_npp.c:44
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:198
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
ScaleStage
ScaleStage
Definition: vf_scale_npp.c:57
nppscale_class
static const AVClass nppscale_class
Definition: vf_scale_npp.c:991
VAR_S2R_MAIN_N
@ VAR_S2R_MAIN_N
Definition: vf_scale_npp.c:113
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:63
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:326
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:410
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:512
AVFrame::width
int width
Definition: frame.h:482
NPPScaleStageContext::planes_in
struct NPPScaleStageContext::@370 planes_in[4]
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:252
NPPScaleStageContext::stage_needed
int stage_needed
Definition: vf_scale_npp.c:65
VAR_A
@ VAR_A
Definition: vf_scale_npp.c:103
AVOption
AVOption.
Definition: opt.h:429
ff_request_frame
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:480
supported_formats
static enum AVPixelFormat supported_formats[]
Definition: vf_scale_npp.c:46
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
STAGE_NB
@ STAGE_NB
Definition: vf_scale_npp.c:61
VAR_SAR
@ VAR_SAR
Definition: vf_scale_npp.c:104
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:203
VAR_S2R_MAIN_T
@ VAR_S2R_MAIN_T
Definition: vf_scale_npp.c:114
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:218
video.h
NPPScaleStageContext::frame
AVFrame * frame
Definition: vf_scale_npp.c:75
VAR_OH
@ VAR_OH
Definition: vf_scale_npp.c:102
NPPScaleContext::size_str
char * size_str
Definition: vf_scale_npp.c:155
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:431
formats.h
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:710
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3381
IS_SCALE2REF
#define IS_SCALE2REF(ctx)
Definition: vf_scale_npp.c:166
fail
#define fail()
Definition: checkasm.h:193
get_deinterleaved_format
static enum AVPixelFormat get_deinterleaved_format(enum AVPixelFormat fmt)
Definition: vf_scale_npp.c:500
dummy
int dummy
Definition: motion.c:66
STAGE_RESIZE
@ STAGE_RESIZE
Definition: vf_scale_npp.c:59
NPPScaleContext::tmp_frame
AVFrame * tmp_frame
Definition: vf_scale_npp.c:128
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3369
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:835
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
NPPScaleContext::shift_width
int shift_width
Definition: vf_scale_npp.c:131
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:358
AVRational::num
int num
Numerator.
Definition: rational.h:59
AV_SIDE_DATA_PROP_SIZE_DEPENDENT
@ AV_SIDE_DATA_PROP_SIZE_DEPENDENT
Side data depends on the video dimensions.
Definition: frame.h:292
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:51
NPPScaleStageContext::planes_out
struct NPPScaleStageContext::@370 planes_out[4]
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
VAR_IH
@ VAR_IH
Definition: vf_scale_npp.c:100
AVHWFramesContext::height
int height
Definition: hwcontext.h:218
FFFilter
Definition: filters.h:265
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:108
nppscale_uninit
static void nppscale_uninit(AVFilterContext *ctx)
Definition: vf_scale_npp.c:415
av_expr_count_vars
int av_expr_count_vars(AVExpr *e, unsigned *counter, int size)
Track the presence of variables and their number of occurrences in a parsed expression.
Definition: eval.c:782
NPPScaleContext::stages
NPPScaleStageContext stages[STAGE_NB]
Definition: vf_scale_npp.c:127
filters.h
config_props
static int config_props(AVFilterLink *outlink)
Definition: vf_scale_npp.c:623
NPPScaleContext::eval_mode
int eval_mode
Definition: vf_scale_npp.c:162
nppscale_eval_dimensions
static int nppscale_eval_dimensions(AVFilterContext *ctx)
Definition: vf_scale_npp.c:354
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:792
NPPScaleContext::force_original_aspect_ratio
int force_original_aspect_ratio
Definition: vf_scale_npp.c:149
AVExpr
Definition: eval.c:158
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
FILTER_OUTPUTS
#define FILTER_OUTPUTS(array)
Definition: filters.h:263
check_exprs
static int check_exprs(AVFilterContext *ctx)
Definition: vf_scale_npp.c:170
NAN
#define NAN
Definition: mathematics.h:115
nppscale2ref_outputs
static const AVFilterPad nppscale2ref_outputs[]
Definition: vf_scale_npp.c:1047
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
nppscale_scale
static int nppscale_scale(AVFilterLink *link, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:777
request_frame_ref
static int request_frame_ref(AVFilterLink *outlink)
Definition: vf_scale_npp.c:957
NPPScaleContext
Definition: vf_scale_npp.c:124
if
if(ret)
Definition: filter_design.txt:179
FLAGS
#define FLAGS
Definition: vf_scale_npp.c:963
var_names
static const char *const var_names[]
Definition: vf_scale_npp.c:78
init_stage
static int init_stage(NPPScaleStageContext *stage, AVBufferRef *device_ctx)
Definition: vf_scale_npp.c:431
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
nppscale_interleave
static int nppscale_interleave(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:745
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:211
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:601
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:127
isnan
#define isnan(x)
Definition: libm.h:342
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:265
EVAL_MODE_INIT
@ EVAL_MODE_INIT
Definition: vf_scale_npp.c:119
VAR_IW
@ VAR_IW
Definition: vf_scale_npp.c:99
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:239
parseutils.h
options
Definition: swscale.c:43
request_frame
static int request_frame(AVFilterLink *outlink)
Definition: vf_scale_npp.c:952
NPPScaleContext::h_expr
char * h_expr
height expression string
Definition: vf_scale_npp.c:146
double
double
Definition: af_crystalizer.c:132
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:197
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
FF_FILTER_FLAG_HWFRAME_AWARE
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: filters.h:206
nppscale_parse_expr
static int nppscale_parse_expr(AVFilterContext *ctx, char *str_expr, AVExpr **pexpr_ptr, const char *var, const char *args)
Definition: vf_scale_npp.c:223
NPPScaleContext::shift_height
int shift_height
Definition: vf_scale_npp.c:131
NPPScaleStageContext::height
int height
Definition: vf_scale_npp.c:71
VAR_IN_H
@ VAR_IN_H
Definition: vf_scale_npp.c:100
VAR_S2R_MAIN_H
@ VAR_S2R_MAIN_H
Definition: vf_scale_npp.c:109
eval.h
VAR_S2R_MAIN_A
@ VAR_S2R_MAIN_A
Definition: vf_scale_npp.c:110
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
NPPScaleStageContext::out_fmt
enum AVPixelFormat out_fmt
Definition: vf_scale_npp.c:67
OFFSET
#define OFFSET(x)
Definition: vf_scale_npp.c:962
NPPScaleStageContext::frames_ctx
AVBufferRef * frames_ctx
Definition: vf_scale_npp.c:74
planes
static const struct @489 planes[]
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:527
nppscale_filter_frame_ref
static int nppscale_filter_frame_ref(AVFilterLink *link, AVFrame *in)
Definition: vf_scale_npp.c:921
TS2T
#define TS2T(ts, tb)
Definition: filters.h:481
nppscale_filter_frame
static int nppscale_filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale_npp.c:876
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:497
VAR_IN_W
@ VAR_IN_W
Definition: vf_scale_npp.c:99
scale_eval.h
init_processing_chain
static int init_processing_chain(AVFilterContext *ctx, int in_width, int in_height, int out_width, int out_height)
Definition: vf_scale_npp.c:514
VARS_NB
@ VARS_NB
Definition: vf_scale_npp.c:115
av_parse_video_size
int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str)
Parse str and put in width_ptr and height_ptr the detected values.
Definition: parseutils.c:150
av_frame_side_data_remove_by_props
void av_frame_side_data_remove_by_props(AVFrameSideData ***sd, int *nb_sd, int props)
Remove and free all side data instances that match any of the given side data properties.
Definition: side_data.c:115
VAR_S2R_MAIN_W
@ VAR_S2R_MAIN_W
Definition: vf_scale_npp.c:108
VAR_OUT_W
@ VAR_OUT_W
Definition: vf_scale_npp.c:101
nppscale_resize
static int nppscale_resize(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:717
VAR_S2R_MAIN_DAR
@ VAR_S2R_MAIN_DAR
Definition: vf_scale_npp.c:112
VAR_S2R_MAIN_SAR
@ VAR_S2R_MAIN_SAR
Definition: vf_scale_npp.c:111
NPPScaleContext::h
int h
Definition: vf_scale_npp.c:138
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
internal.h
EvalMode
EvalMode
Definition: af_volume.h:39
EVAL_MODE_FRAME
@ EVAL_MODE_FRAME
Definition: vf_scale_npp.c:120
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:525
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:498
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
NPPScaleContext::w
int w
New dimensions.
Definition: vf_scale_npp.c:138
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:116
AVCUDADeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_cuda.h:42
config_props_ref
static int config_props_ref(AVFilterLink *outlink)
Definition: vf_scale_npp.c:675
ret
ret
Definition: filter_design.txt:187
NPPScaleContext::format
enum AVPixelFormat format
Output sw format.
Definition: vf_scale_npp.c:143
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:96
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:80
AVHWFramesContext::device_ctx
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:135
cuda_check.h
NPPScaleContext::interp_algo
int interp_algo
Definition: vf_scale_npp.c:153
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:507
NPPScaleContext::w_expr
char * w_expr
width expression string
Definition: vf_scale_npp.c:145
av_get_pix_fmt
enum AVPixelFormat av_get_pix_fmt(const char *name)
Return the pixel format corresponding to name.
Definition: pixdesc.c:3273
NPPScaleStageContext::in_fmt
enum AVPixelFormat in_fmt
Definition: vf_scale_npp.c:66
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:707
nppscale_init
static av_cold int nppscale_init(AVFilterContext *ctx)
Definition: vf_scale_npp.c:279
ff_vf_scale2ref_npp
const FFFilter ff_vf_scale2ref_npp
Definition: vf_scale_npp.c:165
AVFrame::height
int height
Definition: frame.h:482
EVAL_MODE_NB
@ EVAL_MODE_NB
Definition: vf_scale_npp.c:121
NPPScaleContext::format_str
char * format_str
Definition: vf_scale_npp.c:147
VAR_DAR
@ VAR_DAR
Definition: vf_scale_npp.c:105
STAGE_INTERLEAVE
@ STAGE_INTERLEAVE
Definition: vf_scale_npp.c:60
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
nppscale_process
static int(*const nppscale_process[])(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:770
nppscale_outputs
static const AVFilterPad nppscale_outputs[]
Definition: vf_scale_npp.c:1007
NPPScaleContext::force_divisible_by
int force_divisible_by
Definition: vf_scale_npp.c:150
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
NPPScaleStageContext::width
int width
Definition: vf_scale_npp.c:70
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
VAR_S2R_MDAR
@ VAR_S2R_MDAR
Definition: vf_scale_npp.c:112
NPPScaleStageContext
Definition: vf_scale_npp.c:64
AVFilterContext
An instance of a filter.
Definition: avfilter.h:257
STAGE_DEINTERLEAVE
@ STAGE_DEINTERLEAVE
Definition: vf_scale_npp.c:58
VAR_T
@ VAR_T
Definition: vf_scale_npp.c:107
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
FFFilter::p
AVFilter p
The public AVFilter.
Definition: filters.h:269
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
options
static const AVOption options[]
Definition: vf_scale_npp.c:964
nppscale_deinterleave
static int nppscale_deinterleave(AVFilterContext *ctx, NPPScaleStageContext *stage, AVFrame *out, AVFrame *in)
Definition: vf_scale_npp.c:693
VAR_OUT_H
@ VAR_OUT_H
Definition: vf_scale_npp.c:102
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
deinterleaved_formats
static enum AVPixelFormat deinterleaved_formats[][2]
Definition: vf_scale_npp.c:53
NPPScaleContext::reset_sar
int reset_sar
Definition: vf_scale_npp.c:151
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:455
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
NPPScaleContext::var_values
double var_values[VARS_NB]
Definition: vf_scale_npp.c:160
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:495
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
FILTER_SINGLE_PIXFMT
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
Definition: filters.h:252
snprintf
#define snprintf
Definition: snprintf.h:34
NPPScaleContext::w_pexpr
AVExpr * w_pexpr
Definition: vf_scale_npp.c:157
ff_scale_adjust_dimensions
int ff_scale_adjust_dimensions(AVFilterLink *inlink, int *ret_w, int *ret_h, int force_original_aspect_ratio, int force_divisible_by, double w_adj)
Transform evaluated width and height obtained from ff_scale_eval_dimensions into actual target width ...
Definition: scale_eval.c:113
src
#define src
Definition: vp8dsp.c:248
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:3261
VAR_N
@ VAR_N
Definition: vf_scale_npp.c:106