FFmpeg
af_aiir.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/avstring.h"
24 #include "libavutil/intreadwrite.h"
25 #include "libavutil/mem.h"
26 #include "libavutil/opt.h"
28 #include "audio.h"
29 #include "avfilter.h"
30 #include "filters.h"
31 #include "formats.h"
32 #include "video.h"
33 
34 typedef struct ThreadData {
35  AVFrame *in, *out;
36 } ThreadData;
37 
38 typedef struct Pair {
39  int a, b;
40 } Pair;
41 
42 typedef struct BiquadContext {
43  double a[3];
44  double b[3];
45  double w1, w2;
47 
48 typedef struct IIRChannel {
49  int nb_ab[2];
50  double *ab[2];
51  double g;
52  double *cache[2];
53  double fir;
55  int clippings;
56 } IIRChannel;
57 
58 typedef struct AudioIIRContext {
59  const AVClass *class;
60  char *a_str, *b_str, *g_str;
61  double dry_gain, wet_gain;
62  double mix;
63  int normalize;
64  int format;
65  int process;
66  int precision;
67  int response;
68  int w, h;
71 
73 
75  int channels;
77 
78  int (*iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs);
80 
81 static int query_formats(const AVFilterContext *ctx,
82  AVFilterFormatsConfig **cfg_in,
83  AVFilterFormatsConfig **cfg_out)
84 {
85  const AudioIIRContext *s = ctx->priv;
87  enum AVSampleFormat sample_fmts[] = {
90  };
91  static const enum AVPixelFormat pix_fmts[] = {
94  };
95  int ret;
96 
97  if (s->response) {
99  if ((ret = ff_formats_ref(formats, &cfg_out[1]->formats)) < 0)
100  return ret;
101  }
102 
103  sample_fmts[0] = s->sample_format;
105  if (ret < 0)
106  return ret;
107 
108  return 0;
109 }
110 
111 #define IIR_CH(name, type, min, max, need_clipping) \
112 static int iir_ch_## name(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) \
113 { \
114  AudioIIRContext *s = ctx->priv; \
115  const double ig = s->dry_gain; \
116  const double og = s->wet_gain; \
117  const double mix = s->mix; \
118  ThreadData *td = arg; \
119  AVFrame *in = td->in, *out = td->out; \
120  const type *src = (const type *)in->extended_data[ch]; \
121  double *oc = (double *)s->iir[ch].cache[0]; \
122  double *ic = (double *)s->iir[ch].cache[1]; \
123  const int nb_a = s->iir[ch].nb_ab[0]; \
124  const int nb_b = s->iir[ch].nb_ab[1]; \
125  const double *a = s->iir[ch].ab[0]; \
126  const double *b = s->iir[ch].ab[1]; \
127  const double g = s->iir[ch].g; \
128  int *clippings = &s->iir[ch].clippings; \
129  type *dst = (type *)out->extended_data[ch]; \
130  int n; \
131  \
132  for (n = 0; n < in->nb_samples; n++) { \
133  double sample = 0.; \
134  int x; \
135  \
136  memmove(&ic[1], &ic[0], (nb_b - 1) * sizeof(*ic)); \
137  memmove(&oc[1], &oc[0], (nb_a - 1) * sizeof(*oc)); \
138  ic[0] = src[n] * ig; \
139  for (x = 0; x < nb_b; x++) \
140  sample += b[x] * ic[x]; \
141  \
142  for (x = 1; x < nb_a; x++) \
143  sample -= a[x] * oc[x]; \
144  \
145  oc[0] = sample; \
146  sample *= og * g; \
147  sample = sample * mix + ic[0] * (1. - mix); \
148  if (need_clipping && sample < min) { \
149  (*clippings)++; \
150  dst[n] = min; \
151  } else if (need_clipping && sample > max) { \
152  (*clippings)++; \
153  dst[n] = max; \
154  } else { \
155  dst[n] = sample; \
156  } \
157  } \
158  \
159  return 0; \
160 }
161 
162 IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
163 IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
164 IIR_CH(fltp, float, -1., 1., 0)
165 IIR_CH(dblp, double, -1., 1., 0)
166 
167 #define SERIAL_IIR_CH(name, type, min, max, need_clipping) \
168 static int iir_ch_serial_## name(AVFilterContext *ctx, void *arg, \
169  int ch, int nb_jobs) \
170 { \
171  AudioIIRContext *s = ctx->priv; \
172  const double ig = s->dry_gain; \
173  const double og = s->wet_gain; \
174  const double mix = s->mix; \
175  const double imix = 1. - mix; \
176  ThreadData *td = arg; \
177  AVFrame *in = td->in, *out = td->out; \
178  const type *src = (const type *)in->extended_data[ch]; \
179  type *dst = (type *)out->extended_data[ch]; \
180  IIRChannel *iir = &s->iir[ch]; \
181  const double g = iir->g; \
182  int *clippings = &iir->clippings; \
183  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
184  int n, i; \
185  \
186  for (i = nb_biquads - 1; i >= 0; i--) { \
187  const double a1 = -iir->biquads[i].a[1]; \
188  const double a2 = -iir->biquads[i].a[2]; \
189  const double b0 = iir->biquads[i].b[0]; \
190  const double b1 = iir->biquads[i].b[1]; \
191  const double b2 = iir->biquads[i].b[2]; \
192  double w1 = iir->biquads[i].w1; \
193  double w2 = iir->biquads[i].w2; \
194  \
195  for (n = 0; n < in->nb_samples; n++) { \
196  double i0 = ig * (i ? dst[n] : src[n]); \
197  double o0 = i0 * b0 + w1; \
198  \
199  w1 = b1 * i0 + w2 + a1 * o0; \
200  w2 = b2 * i0 + a2 * o0; \
201  o0 *= og * g; \
202  \
203  o0 = o0 * mix + imix * i0; \
204  if (need_clipping && o0 < min) { \
205  (*clippings)++; \
206  dst[n] = min; \
207  } else if (need_clipping && o0 > max) { \
208  (*clippings)++; \
209  dst[n] = max; \
210  } else { \
211  dst[n] = o0; \
212  } \
213  } \
214  iir->biquads[i].w1 = w1; \
215  iir->biquads[i].w2 = w2; \
216  } \
217  \
218  return 0; \
219 }
220 
221 SERIAL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
222 SERIAL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
223 SERIAL_IIR_CH(fltp, float, -1., 1., 0)
224 SERIAL_IIR_CH(dblp, double, -1., 1., 0)
225 
226 #define PARALLEL_IIR_CH(name, type, min, max, need_clipping) \
227 static int iir_ch_parallel_## name(AVFilterContext *ctx, void *arg, \
228  int ch, int nb_jobs) \
229 { \
230  AudioIIRContext *s = ctx->priv; \
231  const double ig = s->dry_gain; \
232  const double og = s->wet_gain; \
233  const double mix = s->mix; \
234  const double imix = 1. - mix; \
235  ThreadData *td = arg; \
236  AVFrame *in = td->in, *out = td->out; \
237  const type *src = (const type *)in->extended_data[ch]; \
238  type *dst = (type *)out->extended_data[ch]; \
239  IIRChannel *iir = &s->iir[ch]; \
240  const double g = iir->g; \
241  const double fir = iir->fir; \
242  int *clippings = &iir->clippings; \
243  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2; \
244  int n, i; \
245  \
246  for (i = 0; i < nb_biquads; i++) { \
247  const double a1 = -iir->biquads[i].a[1]; \
248  const double a2 = -iir->biquads[i].a[2]; \
249  const double b1 = iir->biquads[i].b[1]; \
250  const double b2 = iir->biquads[i].b[2]; \
251  double w1 = iir->biquads[i].w1; \
252  double w2 = iir->biquads[i].w2; \
253  \
254  for (n = 0; n < in->nb_samples; n++) { \
255  double i0 = ig * src[n]; \
256  double o0 = w1; \
257  \
258  w1 = b1 * i0 + w2 + a1 * o0; \
259  w2 = b2 * i0 + a2 * o0; \
260  o0 *= og * g; \
261  o0 += dst[n]; \
262  \
263  if (need_clipping && o0 < min) { \
264  (*clippings)++; \
265  dst[n] = min; \
266  } else if (need_clipping && o0 > max) { \
267  (*clippings)++; \
268  dst[n] = max; \
269  } else { \
270  dst[n] = o0; \
271  } \
272  } \
273  iir->biquads[i].w1 = w1; \
274  iir->biquads[i].w2 = w2; \
275  } \
276  \
277  for (n = 0; n < in->nb_samples; n++) { \
278  dst[n] += fir * src[n]; \
279  dst[n] = dst[n] * mix + imix * src[n]; \
280  } \
281  \
282  return 0; \
283 }
284 
285 PARALLEL_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
286 PARALLEL_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
287 PARALLEL_IIR_CH(fltp, float, -1., 1., 0)
288 PARALLEL_IIR_CH(dblp, double, -1., 1., 0)
289 
290 #define LATTICE_IIR_CH(name, type, min, max, need_clipping) \
291 static int iir_ch_lattice_## name(AVFilterContext *ctx, void *arg, \
292  int ch, int nb_jobs) \
293 { \
294  AudioIIRContext *s = ctx->priv; \
295  const double ig = s->dry_gain; \
296  const double og = s->wet_gain; \
297  const double mix = s->mix; \
298  ThreadData *td = arg; \
299  AVFrame *in = td->in, *out = td->out; \
300  const type *src = (const type *)in->extended_data[ch]; \
301  double n0, n1, p0, *x = (double *)s->iir[ch].cache[0]; \
302  const int nb_stages = s->iir[ch].nb_ab[1]; \
303  const double *v = s->iir[ch].ab[0]; \
304  const double *k = s->iir[ch].ab[1]; \
305  const double g = s->iir[ch].g; \
306  int *clippings = &s->iir[ch].clippings; \
307  type *dst = (type *)out->extended_data[ch]; \
308  int n; \
309  \
310  for (n = 0; n < in->nb_samples; n++) { \
311  const double in = src[n] * ig; \
312  double out = 0.; \
313  \
314  n1 = in; \
315  for (int i = nb_stages - 1; i >= 0; i--) { \
316  n0 = n1 - k[i] * x[i]; \
317  p0 = n0 * k[i] + x[i]; \
318  out += p0 * v[i+1]; \
319  x[i] = p0; \
320  n1 = n0; \
321  } \
322  \
323  out += n1 * v[0]; \
324  memmove(&x[1], &x[0], nb_stages * sizeof(*x)); \
325  x[0] = n1; \
326  out *= og * g; \
327  out = out * mix + in * (1. - mix); \
328  if (need_clipping && out < min) { \
329  (*clippings)++; \
330  dst[n] = min; \
331  } else if (need_clipping && out > max) { \
332  (*clippings)++; \
333  dst[n] = max; \
334  } else { \
335  dst[n] = out; \
336  } \
337  } \
338  \
339  return 0; \
340 }
341 
342 LATTICE_IIR_CH(s16p, int16_t, INT16_MIN, INT16_MAX, 1)
343 LATTICE_IIR_CH(s32p, int32_t, INT32_MIN, INT32_MAX, 1)
344 LATTICE_IIR_CH(fltp, float, -1., 1., 0)
345 LATTICE_IIR_CH(dblp, double, -1., 1., 0)
346 
347 static void count_coefficients(char *item_str, int *nb_items)
348 {
349  char *p;
350 
351  if (!item_str)
352  return;
353 
354  *nb_items = 1;
355  for (p = item_str; *p && *p != '|'; p++) {
356  if (*p == ' ')
357  (*nb_items)++;
358  }
359 }
360 
361 static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
362 {
363  AudioIIRContext *s = ctx->priv;
364  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
365  int i;
366 
367  p = old_str = av_strdup(item_str);
368  if (!p)
369  return AVERROR(ENOMEM);
370  for (i = 0; i < nb_items; i++) {
371  if (!(arg = av_strtok(p, "|", &saveptr)))
372  arg = prev_arg;
373 
374  if (!arg) {
375  av_freep(&old_str);
376  return AVERROR(EINVAL);
377  }
378 
379  p = NULL;
380  if (av_sscanf(arg, "%lf", &s->iir[i].g) != 1) {
381  av_log(ctx, AV_LOG_ERROR, "Invalid gains supplied: %s\n", arg);
382  av_freep(&old_str);
383  return AVERROR(EINVAL);
384  }
385 
386  prev_arg = arg;
387  }
388 
389  av_freep(&old_str);
390 
391  return 0;
392 }
393 
394 static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
395 {
396  char *p, *arg, *old_str, *saveptr = NULL;
397  int i;
398 
399  p = old_str = av_strdup(item_str);
400  if (!p)
401  return AVERROR(ENOMEM);
402  for (i = 0; i < nb_items; i++) {
403  if (!(arg = av_strtok(p, " ", &saveptr)))
404  break;
405 
406  p = NULL;
407  if (av_sscanf(arg, "%lf", &dst[i]) != 1) {
408  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
409  av_freep(&old_str);
410  return AVERROR(EINVAL);
411  }
412  }
413 
414  av_freep(&old_str);
415 
416  return 0;
417 }
418 
419 static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
420 {
421  char *p, *arg, *old_str, *saveptr = NULL;
422  int i;
423 
424  p = old_str = av_strdup(item_str);
425  if (!p)
426  return AVERROR(ENOMEM);
427  for (i = 0; i < nb_items; i++) {
428  if (!(arg = av_strtok(p, " ", &saveptr)))
429  break;
430 
431  p = NULL;
432  if (av_sscanf(arg, format, &dst[i*2], &dst[i*2+1]) != 2) {
433  av_log(ctx, AV_LOG_ERROR, "Invalid coefficients supplied: %s\n", arg);
434  av_freep(&old_str);
435  return AVERROR(EINVAL);
436  }
437  }
438 
439  av_freep(&old_str);
440 
441  return 0;
442 }
443 
444 static const char *const format[] = { "%lf", "%lf %lfi", "%lf %lfr", "%lf %lfd", "%lf %lfi" };
445 
446 static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
447 {
448  AudioIIRContext *s = ctx->priv;
449  char *p, *arg, *old_str, *prev_arg = NULL, *saveptr = NULL;
450  int i, ret;
451 
452  p = old_str = av_strdup(item_str);
453  if (!p)
454  return AVERROR(ENOMEM);
455  for (i = 0; i < channels; i++) {
456  IIRChannel *iir = &s->iir[i];
457 
458  if (!(arg = av_strtok(p, "|", &saveptr)))
459  arg = prev_arg;
460 
461  if (!arg) {
462  av_freep(&old_str);
463  return AVERROR(EINVAL);
464  }
465 
466  count_coefficients(arg, &iir->nb_ab[ab]);
467 
468  p = NULL;
469  iir->cache[ab] = av_calloc(iir->nb_ab[ab] + 1, sizeof(double));
470  iir->ab[ab] = av_calloc(iir->nb_ab[ab] * (!!s->format + 1), sizeof(double));
471  if (!iir->ab[ab] || !iir->cache[ab]) {
472  av_freep(&old_str);
473  return AVERROR(ENOMEM);
474  }
475 
476  if (s->format > 0) {
477  ret = read_zp_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab], format[s->format]);
478  } else {
479  ret = read_tf_coefficients(ctx, arg, iir->nb_ab[ab], iir->ab[ab]);
480  }
481  if (ret < 0) {
482  av_freep(&old_str);
483  return ret;
484  }
485  prev_arg = arg;
486  }
487 
488  av_freep(&old_str);
489 
490  return 0;
491 }
492 
493 static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
494 {
495  *RE = re * re2 - im * im2;
496  *IM = re * im2 + re2 * im;
497 }
498 
499 static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
500 {
501  coefs[2 * n] = 1.0;
502 
503  for (int i = 1; i <= n; i++) {
504  for (int j = n - i; j < n; j++) {
505  double re, im;
506 
507  cmul(coefs[2 * (j + 1)], coefs[2 * (j + 1) + 1],
508  pz[2 * (i - 1)], pz[2 * (i - 1) + 1], &re, &im);
509 
510  coefs[2 * j] -= re;
511  coefs[2 * j + 1] -= im;
512  }
513  }
514 
515  for (int i = 0; i < n + 1; i++) {
516  if (fabs(coefs[2 * i + 1]) > FLT_EPSILON) {
517  av_log(ctx, AV_LOG_ERROR, "coefs: %f of z^%d is not real; poles/zeros are not complex conjugates.\n",
518  coefs[2 * i + 1], i);
519  return AVERROR(EINVAL);
520  }
521  }
522 
523  return 0;
524 }
525 
526 static void normalize_coeffs(AVFilterContext *ctx, int ch)
527 {
528  AudioIIRContext *s = ctx->priv;
529  IIRChannel *iir = &s->iir[ch];
530  double sum_den = 0.;
531 
532  if (!s->normalize)
533  return;
534 
535  for (int i = 0; i < iir->nb_ab[1]; i++) {
536  sum_den += iir->ab[1][i];
537  }
538 
539  if (sum_den > 1e-6) {
540  double factor, sum_num = 0.;
541 
542  for (int i = 0; i < iir->nb_ab[0]; i++) {
543  sum_num += iir->ab[0][i];
544  }
545 
546  factor = sum_num / sum_den;
547 
548  for (int i = 0; i < iir->nb_ab[1]; i++) {
549  iir->ab[1][i] *= factor;
550  }
551  }
552 }
553 
555 {
556  AudioIIRContext *s = ctx->priv;
557  int ch, i, j, ret = 0;
558 
559  for (ch = 0; ch < channels; ch++) {
560  IIRChannel *iir = &s->iir[ch];
561  double *topc, *botc;
562 
563  topc = av_calloc((iir->nb_ab[1] + 1) * 2, sizeof(*topc));
564  botc = av_calloc((iir->nb_ab[0] + 1) * 2, sizeof(*botc));
565  if (!topc || !botc) {
566  ret = AVERROR(ENOMEM);
567  goto fail;
568  }
569 
570  ret = expand(ctx, iir->ab[0], iir->nb_ab[0], botc);
571  if (ret < 0) {
572  goto fail;
573  }
574 
575  ret = expand(ctx, iir->ab[1], iir->nb_ab[1], topc);
576  if (ret < 0) {
577  goto fail;
578  }
579 
580  for (j = 0, i = iir->nb_ab[1]; i >= 0; j++, i--) {
581  iir->ab[1][j] = topc[2 * i];
582  }
583  iir->nb_ab[1]++;
584 
585  for (j = 0, i = iir->nb_ab[0]; i >= 0; j++, i--) {
586  iir->ab[0][j] = botc[2 * i];
587  }
588  iir->nb_ab[0]++;
589 
590  normalize_coeffs(ctx, ch);
591 
592 fail:
593  av_free(topc);
594  av_free(botc);
595  if (ret < 0)
596  break;
597  }
598 
599  return ret;
600 }
601 
603 {
604  AudioIIRContext *s = ctx->priv;
605  int ch, ret;
606 
607  for (ch = 0; ch < channels; ch++) {
608  IIRChannel *iir = &s->iir[ch];
609  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
610  int current_biquad = 0;
611 
612  iir->biquads = av_calloc(nb_biquads, sizeof(BiquadContext));
613  if (!iir->biquads)
614  return AVERROR(ENOMEM);
615 
616  while (nb_biquads--) {
617  Pair outmost_pole = { -1, -1 };
618  Pair nearest_zero = { -1, -1 };
619  double zeros[4] = { 0 };
620  double poles[4] = { 0 };
621  double b[6] = { 0 };
622  double a[6] = { 0 };
623  double min_distance = DBL_MAX;
624  double max_mag = 0;
625  double factor;
626  int i;
627 
628  for (i = 0; i < iir->nb_ab[0]; i++) {
629  double mag;
630 
631  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
632  continue;
633  mag = hypot(iir->ab[0][2 * i], iir->ab[0][2 * i + 1]);
634 
635  if (mag > max_mag) {
636  max_mag = mag;
637  outmost_pole.a = i;
638  }
639  }
640 
641  for (i = 0; i < iir->nb_ab[0]; i++) {
642  if (isnan(iir->ab[0][2 * i]) || isnan(iir->ab[0][2 * i + 1]))
643  continue;
644 
645  if (iir->ab[0][2 * i ] == iir->ab[0][2 * outmost_pole.a ] &&
646  iir->ab[0][2 * i + 1] == -iir->ab[0][2 * outmost_pole.a + 1]) {
647  outmost_pole.b = i;
648  break;
649  }
650  }
651 
652  av_log(ctx, AV_LOG_VERBOSE, "outmost_pole is %d.%d\n", outmost_pole.a, outmost_pole.b);
653 
654  if (outmost_pole.a < 0 || outmost_pole.b < 0)
655  return AVERROR(EINVAL);
656 
657  for (i = 0; i < iir->nb_ab[1]; i++) {
658  double distance;
659 
660  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
661  continue;
662  distance = hypot(iir->ab[0][2 * outmost_pole.a ] - iir->ab[1][2 * i ],
663  iir->ab[0][2 * outmost_pole.a + 1] - iir->ab[1][2 * i + 1]);
664 
665  if (distance < min_distance) {
666  min_distance = distance;
667  nearest_zero.a = i;
668  }
669  }
670 
671  for (i = 0; i < iir->nb_ab[1]; i++) {
672  if (isnan(iir->ab[1][2 * i]) || isnan(iir->ab[1][2 * i + 1]))
673  continue;
674 
675  if (iir->ab[1][2 * i ] == iir->ab[1][2 * nearest_zero.a ] &&
676  iir->ab[1][2 * i + 1] == -iir->ab[1][2 * nearest_zero.a + 1]) {
677  nearest_zero.b = i;
678  break;
679  }
680  }
681 
682  av_log(ctx, AV_LOG_VERBOSE, "nearest_zero is %d.%d\n", nearest_zero.a, nearest_zero.b);
683 
684  if (nearest_zero.a < 0 || nearest_zero.b < 0)
685  return AVERROR(EINVAL);
686 
687  poles[0] = iir->ab[0][2 * outmost_pole.a ];
688  poles[1] = iir->ab[0][2 * outmost_pole.a + 1];
689 
690  zeros[0] = iir->ab[1][2 * nearest_zero.a ];
691  zeros[1] = iir->ab[1][2 * nearest_zero.a + 1];
692 
693  if (nearest_zero.a == nearest_zero.b && outmost_pole.a == outmost_pole.b) {
694  zeros[2] = 0;
695  zeros[3] = 0;
696 
697  poles[2] = 0;
698  poles[3] = 0;
699  } else {
700  poles[2] = iir->ab[0][2 * outmost_pole.b ];
701  poles[3] = iir->ab[0][2 * outmost_pole.b + 1];
702 
703  zeros[2] = iir->ab[1][2 * nearest_zero.b ];
704  zeros[3] = iir->ab[1][2 * nearest_zero.b + 1];
705  }
706 
707  ret = expand(ctx, zeros, 2, b);
708  if (ret < 0)
709  return ret;
710 
711  ret = expand(ctx, poles, 2, a);
712  if (ret < 0)
713  return ret;
714 
715  iir->ab[0][2 * outmost_pole.a] = iir->ab[0][2 * outmost_pole.a + 1] = NAN;
716  iir->ab[0][2 * outmost_pole.b] = iir->ab[0][2 * outmost_pole.b + 1] = NAN;
717  iir->ab[1][2 * nearest_zero.a] = iir->ab[1][2 * nearest_zero.a + 1] = NAN;
718  iir->ab[1][2 * nearest_zero.b] = iir->ab[1][2 * nearest_zero.b + 1] = NAN;
719 
720  iir->biquads[current_biquad].a[0] = 1.;
721  iir->biquads[current_biquad].a[1] = a[2] / a[4];
722  iir->biquads[current_biquad].a[2] = a[0] / a[4];
723  iir->biquads[current_biquad].b[0] = b[4] / a[4];
724  iir->biquads[current_biquad].b[1] = b[2] / a[4];
725  iir->biquads[current_biquad].b[2] = b[0] / a[4];
726 
727  if (s->normalize &&
728  fabs(iir->biquads[current_biquad].b[0] +
729  iir->biquads[current_biquad].b[1] +
730  iir->biquads[current_biquad].b[2]) > 1e-6) {
731  factor = (iir->biquads[current_biquad].a[0] +
732  iir->biquads[current_biquad].a[1] +
733  iir->biquads[current_biquad].a[2]) /
734  (iir->biquads[current_biquad].b[0] +
735  iir->biquads[current_biquad].b[1] +
736  iir->biquads[current_biquad].b[2]);
737 
738  av_log(ctx, AV_LOG_VERBOSE, "factor=%f\n", factor);
739 
740  iir->biquads[current_biquad].b[0] *= factor;
741  iir->biquads[current_biquad].b[1] *= factor;
742  iir->biquads[current_biquad].b[2] *= factor;
743  }
744 
745  iir->biquads[current_biquad].b[0] *= (current_biquad ? 1.0 : iir->g);
746  iir->biquads[current_biquad].b[1] *= (current_biquad ? 1.0 : iir->g);
747  iir->biquads[current_biquad].b[2] *= (current_biquad ? 1.0 : iir->g);
748 
749  av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n",
750  iir->biquads[current_biquad].a[0],
751  iir->biquads[current_biquad].a[1],
752  iir->biquads[current_biquad].a[2],
753  iir->biquads[current_biquad].b[0],
754  iir->biquads[current_biquad].b[1],
755  iir->biquads[current_biquad].b[2]);
756 
757  current_biquad++;
758  }
759  }
760 
761  return 0;
762 }
763 
764 static void biquad_process(double *x, double *y, int length,
765  double b0, double b1, double b2,
766  double a1, double a2)
767 {
768  double w1 = 0., w2 = 0.;
769 
770  a1 = -a1;
771  a2 = -a2;
772 
773  for (int n = 0; n < length; n++) {
774  double out, in = x[n];
775 
776  y[n] = out = in * b0 + w1;
777  w1 = b1 * in + w2 + a1 * out;
778  w2 = b2 * in + a2 * out;
779  }
780 }
781 
782 static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
783 {
784  double sum = 0.;
785 
786  for (int i = 0; i < n; i++) {
787  for (int j = i; j < n; j++) {
788  sum = 0.;
789  for (int k = 0; k < i; k++)
790  sum += lu[i * n + k] * lu[k * n + j];
791  lu[i * n + j] = matrix[j * n + i] - sum;
792  }
793  for (int j = i + 1; j < n; j++) {
794  sum = 0.;
795  for (int k = 0; k < i; k++)
796  sum += lu[j * n + k] * lu[k * n + i];
797  lu[j * n + i] = (1. / lu[i * n + i]) * (matrix[i * n + j] - sum);
798  }
799  }
800 
801  for (int i = 0; i < n; i++) {
802  sum = 0.;
803  for (int k = 0; k < i; k++)
804  sum += lu[i * n + k] * y[k];
805  y[i] = vector[i] - sum;
806  }
807 
808  for (int i = n - 1; i >= 0; i--) {
809  sum = 0.;
810  for (int k = i + 1; k < n; k++)
811  sum += lu[i * n + k] * x[k];
812  x[i] = (1 / lu[i * n + i]) * (y[i] - sum);
813  }
814 }
815 
817 {
818  AudioIIRContext *s = ctx->priv;
819 
820  for (int ch = 0; ch < channels; ch++) {
821  IIRChannel *iir = &s->iir[ch];
822  int nb_biquads = (FFMAX(iir->nb_ab[0], iir->nb_ab[1]) + 1) / 2;
823  int length = nb_biquads * 2 + 1;
824  double *impulse = av_calloc(length, sizeof(*impulse));
825  double *y = av_calloc(length, sizeof(*y));
826  double *resp = av_calloc(length, sizeof(*resp));
827  double *M = av_calloc((length - 1) * nb_biquads, 2 * 2 * sizeof(*M));
828  double *W;
829 
830  if (!impulse || !y || !resp || !M) {
831  av_free(impulse);
832  av_free(y);
833  av_free(resp);
834  av_free(M);
835  return AVERROR(ENOMEM);
836  }
837  W = M + (length - 1) * 2 * nb_biquads;
838 
839  impulse[0] = 1.;
840 
841  for (int n = 0; n < nb_biquads; n++) {
842  BiquadContext *biquad = &iir->biquads[n];
843 
844  biquad_process(n ? y : impulse, y, length,
845  biquad->b[0], biquad->b[1], biquad->b[2],
846  biquad->a[1], biquad->a[2]);
847  }
848 
849  for (int n = 0; n < nb_biquads; n++) {
850  BiquadContext *biquad = &iir->biquads[n];
851 
852  biquad_process(impulse, resp, length - 1,
853  1., 0., 0., biquad->a[1], biquad->a[2]);
854 
855  memcpy(M + n * 2 * (length - 1), resp, sizeof(*resp) * (length - 1));
856  memcpy(M + n * 2 * (length - 1) + length, resp, sizeof(*resp) * (length - 2));
857  memset(resp, 0, length * sizeof(*resp));
858  }
859 
860  solve(M, &y[1], length - 1, &impulse[1], resp, W);
861 
862  iir->fir = y[0];
863 
864  for (int n = 0; n < nb_biquads; n++) {
865  BiquadContext *biquad = &iir->biquads[n];
866 
867  biquad->b[0] = 0.;
868  biquad->b[1] = resp[n * 2 + 0];
869  biquad->b[2] = resp[n * 2 + 1];
870  }
871 
872  av_free(impulse);
873  av_free(y);
874  av_free(resp);
875  av_free(M);
876  }
877 
878  return 0;
879 }
880 
882 {
883  AudioIIRContext *s = ctx->priv;
884  int ch;
885 
886  for (ch = 0; ch < channels; ch++) {
887  IIRChannel *iir = &s->iir[ch];
888  int n;
889 
890  for (n = 0; n < iir->nb_ab[0]; n++) {
891  double r = iir->ab[0][2*n];
892  double angle = iir->ab[0][2*n+1];
893 
894  iir->ab[0][2*n] = r * cos(angle);
895  iir->ab[0][2*n+1] = r * sin(angle);
896  }
897 
898  for (n = 0; n < iir->nb_ab[1]; n++) {
899  double r = iir->ab[1][2*n];
900  double angle = iir->ab[1][2*n+1];
901 
902  iir->ab[1][2*n] = r * cos(angle);
903  iir->ab[1][2*n+1] = r * sin(angle);
904  }
905  }
906 }
907 
909 {
910  AudioIIRContext *s = ctx->priv;
911  int ch;
912 
913  for (ch = 0; ch < channels; ch++) {
914  IIRChannel *iir = &s->iir[ch];
915  int n;
916 
917  for (n = 0; n < iir->nb_ab[0]; n++) {
918  double sr = iir->ab[0][2*n];
919  double si = iir->ab[0][2*n+1];
920 
921  iir->ab[0][2*n] = exp(sr) * cos(si);
922  iir->ab[0][2*n+1] = exp(sr) * sin(si);
923  }
924 
925  for (n = 0; n < iir->nb_ab[1]; n++) {
926  double sr = iir->ab[1][2*n];
927  double si = iir->ab[1][2*n+1];
928 
929  iir->ab[1][2*n] = exp(sr) * cos(si);
930  iir->ab[1][2*n+1] = exp(sr) * sin(si);
931  }
932  }
933 }
934 
935 static double fact(double i)
936 {
937  if (i <= 0.)
938  return 1.;
939  return i * fact(i - 1.);
940 }
941 
942 static double coef_sf2zf(double *a, int N, int n)
943 {
944  double z = 0.;
945 
946  for (int i = 0; i <= N; i++) {
947  double acc = 0.;
948 
949  for (int k = FFMAX(n - N + i, 0); k <= FFMIN(i, n); k++) {
950  acc += ((fact(i) * fact(N - i)) /
951  (fact(k) * fact(i - k) * fact(n - k) * fact(N - i - n + k))) *
952  ((k & 1) ? -1. : 1.);
953  }
954 
955  z += a[i] * pow(2., i) * acc;
956  }
957 
958  return z;
959 }
960 
962 {
963  AudioIIRContext *s = ctx->priv;
964  int ch;
965 
966  for (ch = 0; ch < channels; ch++) {
967  IIRChannel *iir = &s->iir[ch];
968  double *temp0 = av_calloc(iir->nb_ab[0], sizeof(*temp0));
969  double *temp1 = av_calloc(iir->nb_ab[1], sizeof(*temp1));
970 
971  if (!temp0 || !temp1)
972  goto next;
973 
974  memcpy(temp0, iir->ab[0], iir->nb_ab[0] * sizeof(*temp0));
975  memcpy(temp1, iir->ab[1], iir->nb_ab[1] * sizeof(*temp1));
976 
977  for (int n = 0; n < iir->nb_ab[0]; n++)
978  iir->ab[0][n] = coef_sf2zf(temp0, iir->nb_ab[0] - 1, n);
979 
980  for (int n = 0; n < iir->nb_ab[1]; n++)
981  iir->ab[1][n] = coef_sf2zf(temp1, iir->nb_ab[1] - 1, n);
982 
983 next:
984  av_free(temp0);
985  av_free(temp1);
986  }
987 }
988 
990 {
991  AudioIIRContext *s = ctx->priv;
992  int ch;
993 
994  for (ch = 0; ch < channels; ch++) {
995  IIRChannel *iir = &s->iir[ch];
996  int n;
997 
998  for (n = 0; n < iir->nb_ab[0]; n++) {
999  double r = iir->ab[0][2*n];
1000  double angle = M_PI*iir->ab[0][2*n+1]/180.;
1001 
1002  iir->ab[0][2*n] = r * cos(angle);
1003  iir->ab[0][2*n+1] = r * sin(angle);
1004  }
1005 
1006  for (n = 0; n < iir->nb_ab[1]; n++) {
1007  double r = iir->ab[1][2*n];
1008  double angle = M_PI*iir->ab[1][2*n+1]/180.;
1009 
1010  iir->ab[1][2*n] = r * cos(angle);
1011  iir->ab[1][2*n+1] = r * sin(angle);
1012  }
1013  }
1014 }
1015 
1017 {
1018  AudioIIRContext *s = ctx->priv;
1019  int ch;
1020 
1021  for (ch = 0; ch < channels; ch++) {
1022  IIRChannel *iir = &s->iir[ch];
1023 
1024  for (int n = 0; n < iir->nb_ab[0]; n++) {
1025  double pr = hypot(iir->ab[0][2*n], iir->ab[0][2*n+1]);
1026 
1027  if (pr >= 1.) {
1028  av_log(ctx, AV_LOG_WARNING, "pole %d at channel %d is unstable\n", n, ch);
1029  break;
1030  }
1031  }
1032  }
1033 }
1034 
1035 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
1036 {
1037  const uint8_t *font;
1038  int font_height;
1039  int i;
1040 
1041  font = avpriv_cga_font, font_height = 8;
1042 
1043  for (i = 0; txt[i]; i++) {
1044  int char_y, mask;
1045 
1046  uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
1047  for (char_y = 0; char_y < font_height; char_y++) {
1048  for (mask = 0x80; mask; mask >>= 1) {
1049  if (font[txt[i] * font_height + char_y] & mask)
1050  AV_WL32(p, color);
1051  p += 4;
1052  }
1053  p += pic->linesize[0] - 8 * 4;
1054  }
1055  }
1056 }
1057 
1058 static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
1059 {
1060  int dx = FFABS(x1-x0);
1061  int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
1062  int err = (dx>dy ? dx : -dy) / 2, e2;
1063 
1064  for (;;) {
1065  AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
1066 
1067  if (x0 == x1 && y0 == y1)
1068  break;
1069 
1070  e2 = err;
1071 
1072  if (e2 >-dx) {
1073  err -= dy;
1074  x0--;
1075  }
1076 
1077  if (e2 < dy) {
1078  err += dx;
1079  y0 += sy;
1080  }
1081  }
1082 }
1083 
1084 static double distance(double x0, double x1, double y0, double y1)
1085 {
1086  return hypot(x0 - x1, y0 - y1);
1087 }
1088 
1089 static void get_response(int channel, int format, double w,
1090  const double *b, const double *a,
1091  int nb_b, int nb_a, double *magnitude, double *phase)
1092 {
1093  double realz, realp;
1094  double imagz, imagp;
1095  double real, imag;
1096  double div;
1097 
1098  if (format == 0) {
1099  realz = 0., realp = 0.;
1100  imagz = 0., imagp = 0.;
1101  for (int x = 0; x < nb_a; x++) {
1102  realz += cos(-x * w) * a[x];
1103  imagz += sin(-x * w) * a[x];
1104  }
1105 
1106  for (int x = 0; x < nb_b; x++) {
1107  realp += cos(-x * w) * b[x];
1108  imagp += sin(-x * w) * b[x];
1109  }
1110 
1111  div = realp * realp + imagp * imagp;
1112  real = (realz * realp + imagz * imagp) / div;
1113  imag = (imagz * realp - imagp * realz) / div;
1114 
1115  *magnitude = hypot(real, imag);
1116  *phase = atan2(imag, real);
1117  } else {
1118  double p = 1., z = 1.;
1119  double acc = 0.;
1120 
1121  for (int x = 0; x < nb_a; x++) {
1122  z *= distance(cos(w), a[2 * x], sin(w), a[2 * x + 1]);
1123  acc += atan2(sin(w) - a[2 * x + 1], cos(w) - a[2 * x]);
1124  }
1125 
1126  for (int x = 0; x < nb_b; x++) {
1127  p *= distance(cos(w), b[2 * x], sin(w), b[2 * x + 1]);
1128  acc -= atan2(sin(w) - b[2 * x + 1], cos(w) - b[2 * x]);
1129  }
1130 
1131  *magnitude = z / p;
1132  *phase = acc;
1133  }
1134 }
1135 
1136 static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
1137 {
1138  AudioIIRContext *s = ctx->priv;
1139  double *mag, *phase, *temp, *delay, min = DBL_MAX, max = -DBL_MAX;
1140  double min_delay = DBL_MAX, max_delay = -DBL_MAX, min_phase, max_phase;
1141  int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
1142  char text[32];
1143  int ch, i;
1144 
1145  memset(out->data[0], 0, s->h * out->linesize[0]);
1146 
1147  phase = av_malloc_array(s->w, sizeof(*phase));
1148  temp = av_malloc_array(s->w, sizeof(*temp));
1149  mag = av_malloc_array(s->w, sizeof(*mag));
1150  delay = av_malloc_array(s->w, sizeof(*delay));
1151  if (!mag || !phase || !delay || !temp)
1152  goto end;
1153 
1154  ch = av_clip(s->ir_channel, 0, s->channels - 1);
1155  for (i = 0; i < s->w; i++) {
1156  const double *b = s->iir[ch].ab[0];
1157  const double *a = s->iir[ch].ab[1];
1158  const int nb_b = s->iir[ch].nb_ab[0];
1159  const int nb_a = s->iir[ch].nb_ab[1];
1160  double w = i * M_PI / (s->w - 1);
1161  double m, p;
1162 
1163  get_response(ch, s->format, w, b, a, nb_b, nb_a, &m, &p);
1164 
1165  mag[i] = s->iir[ch].g * m;
1166  phase[i] = p;
1167  min = fmin(min, mag[i]);
1168  max = fmax(max, mag[i]);
1169  }
1170 
1171  temp[0] = 0.;
1172  for (i = 0; i < s->w - 1; i++) {
1173  double d = phase[i] - phase[i + 1];
1174  temp[i + 1] = ceil(fabs(d) / (2. * M_PI)) * 2. * M_PI * ((d > M_PI) - (d < -M_PI));
1175  }
1176 
1177  min_phase = phase[0];
1178  max_phase = phase[0];
1179  for (i = 1; i < s->w; i++) {
1180  temp[i] += temp[i - 1];
1181  phase[i] += temp[i];
1182  min_phase = fmin(min_phase, phase[i]);
1183  max_phase = fmax(max_phase, phase[i]);
1184  }
1185 
1186  for (i = 0; i < s->w - 1; i++) {
1187  double div = s->w / (double)sample_rate;
1188 
1189  delay[i + 1] = -(phase[i] - phase[i + 1]) / div;
1190  min_delay = fmin(min_delay, delay[i + 1]);
1191  max_delay = fmax(max_delay, delay[i + 1]);
1192  }
1193  delay[0] = delay[1];
1194 
1195  for (i = 0; i < s->w; i++) {
1196  int ymag = mag[i] / max * (s->h - 1);
1197  int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
1198  int yphase = (phase[i] - min_phase) / (max_phase - min_phase) * (s->h - 1);
1199 
1200  ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
1201  yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
1202  ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
1203 
1204  if (prev_ymag < 0)
1205  prev_ymag = ymag;
1206  if (prev_yphase < 0)
1207  prev_yphase = yphase;
1208  if (prev_ydelay < 0)
1209  prev_ydelay = ydelay;
1210 
1211  draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
1212  draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
1213  draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
1214 
1215  prev_ymag = ymag;
1216  prev_yphase = yphase;
1217  prev_ydelay = ydelay;
1218  }
1219 
1220  if (s->w > 400 && s->h > 100) {
1221  drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
1222  snprintf(text, sizeof(text), "%.2f", max);
1223  drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
1224 
1225  drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
1226  snprintf(text, sizeof(text), "%.2f", min);
1227  drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
1228 
1229  drawtext(out, 2, 22, "Max Phase:", 0xDDDDDDDD);
1230  snprintf(text, sizeof(text), "%.2f", max_phase);
1231  drawtext(out, 15 * 8 + 2, 22, text, 0xDDDDDDDD);
1232 
1233  drawtext(out, 2, 32, "Min Phase:", 0xDDDDDDDD);
1234  snprintf(text, sizeof(text), "%.2f", min_phase);
1235  drawtext(out, 15 * 8 + 2, 32, text, 0xDDDDDDDD);
1236 
1237  drawtext(out, 2, 42, "Max Delay:", 0xDDDDDDDD);
1238  snprintf(text, sizeof(text), "%.2f", max_delay);
1239  drawtext(out, 11 * 8 + 2, 42, text, 0xDDDDDDDD);
1240 
1241  drawtext(out, 2, 52, "Min Delay:", 0xDDDDDDDD);
1242  snprintf(text, sizeof(text), "%.2f", min_delay);
1243  drawtext(out, 11 * 8 + 2, 52, text, 0xDDDDDDDD);
1244  }
1245 
1246 end:
1247  av_free(delay);
1248  av_free(temp);
1249  av_free(phase);
1250  av_free(mag);
1251 }
1252 
1253 static int config_output(AVFilterLink *outlink)
1254 {
1255  AVFilterContext *ctx = outlink->src;
1256  AudioIIRContext *s = ctx->priv;
1257  AVFilterLink *inlink = ctx->inputs[0];
1258  int ch, ret, i;
1259 
1260  s->channels = inlink->ch_layout.nb_channels;
1261  s->iir = av_calloc(s->channels, sizeof(*s->iir));
1262  if (!s->iir)
1263  return AVERROR(ENOMEM);
1264 
1265  ret = read_gains(ctx, s->g_str, inlink->ch_layout.nb_channels);
1266  if (ret < 0)
1267  return ret;
1268 
1269  ret = read_channels(ctx, inlink->ch_layout.nb_channels, s->a_str, 0);
1270  if (ret < 0)
1271  return ret;
1272 
1273  ret = read_channels(ctx, inlink->ch_layout.nb_channels, s->b_str, 1);
1274  if (ret < 0)
1275  return ret;
1276 
1277  if (s->format == -1) {
1278  convert_sf2tf(ctx, inlink->ch_layout.nb_channels);
1279  s->format = 0;
1280  } else if (s->format == 2) {
1281  convert_pr2zp(ctx, inlink->ch_layout.nb_channels);
1282  } else if (s->format == 3) {
1283  convert_pd2zp(ctx, inlink->ch_layout.nb_channels);
1284  } else if (s->format == 4) {
1285  convert_sp2zp(ctx, inlink->ch_layout.nb_channels);
1286  }
1287  if (s->format > 0) {
1288  check_stability(ctx, inlink->ch_layout.nb_channels);
1289  }
1290 
1291  av_frame_free(&s->video);
1292  if (s->response) {
1293  s->video = ff_get_video_buffer(ctx->outputs[1], s->w, s->h);
1294  if (!s->video)
1295  return AVERROR(ENOMEM);
1296 
1297  draw_response(ctx, s->video, inlink->sample_rate);
1298  }
1299 
1300  if (s->format == 0)
1301  av_log(ctx, AV_LOG_WARNING, "transfer function coefficients format is not recommended for too high number of zeros/poles.\n");
1302 
1303  if (s->format > 0 && s->process == 0) {
1304  av_log(ctx, AV_LOG_WARNING, "Direct processing is not recommended for zp coefficients format.\n");
1305 
1306  ret = convert_zp2tf(ctx, inlink->ch_layout.nb_channels);
1307  if (ret < 0)
1308  return ret;
1309  } else if (s->format == -2 && s->process > 0) {
1310  av_log(ctx, AV_LOG_ERROR, "Only direct processing is implemented for lattice-ladder function.\n");
1311  return AVERROR_PATCHWELCOME;
1312  } else if (s->format <= 0 && s->process == 1) {
1313  av_log(ctx, AV_LOG_ERROR, "Serial processing is not implemented for transfer function.\n");
1314  return AVERROR_PATCHWELCOME;
1315  } else if (s->format <= 0 && s->process == 2) {
1316  av_log(ctx, AV_LOG_ERROR, "Parallel processing is not implemented for transfer function.\n");
1317  return AVERROR_PATCHWELCOME;
1318  } else if (s->format > 0 && s->process == 1) {
1319  ret = decompose_zp2biquads(ctx, inlink->ch_layout.nb_channels);
1320  if (ret < 0)
1321  return ret;
1322  } else if (s->format > 0 && s->process == 2) {
1323  if (s->precision > 1)
1324  av_log(ctx, AV_LOG_WARNING, "Parallel processing is not recommended for fixed-point precisions.\n");
1325  ret = decompose_zp2biquads(ctx, inlink->ch_layout.nb_channels);
1326  if (ret < 0)
1327  return ret;
1328  ret = convert_serial2parallel(ctx, inlink->ch_layout.nb_channels);
1329  if (ret < 0)
1330  return ret;
1331  }
1332 
1333  for (ch = 0; s->format == -2 && ch < inlink->ch_layout.nb_channels; ch++) {
1334  IIRChannel *iir = &s->iir[ch];
1335 
1336  if (iir->nb_ab[0] != iir->nb_ab[1] + 1) {
1337  av_log(ctx, AV_LOG_ERROR, "Number of ladder coefficients must be one more than number of reflection coefficients.\n");
1338  return AVERROR(EINVAL);
1339  }
1340  }
1341 
1342  for (ch = 0; s->format == 0 && ch < inlink->ch_layout.nb_channels; ch++) {
1343  IIRChannel *iir = &s->iir[ch];
1344 
1345  for (i = 1; i < iir->nb_ab[0]; i++) {
1346  iir->ab[0][i] /= iir->ab[0][0];
1347  }
1348 
1349  iir->ab[0][0] = 1.0;
1350  for (i = 0; i < iir->nb_ab[1]; i++) {
1351  iir->ab[1][i] *= iir->g;
1352  }
1353 
1354  normalize_coeffs(ctx, ch);
1355  }
1356 
1357  switch (inlink->format) {
1358  case AV_SAMPLE_FMT_DBLP: s->iir_channel = s->process == 2 ? iir_ch_parallel_dblp : s->process == 1 ? iir_ch_serial_dblp : iir_ch_dblp; break;
1359  case AV_SAMPLE_FMT_FLTP: s->iir_channel = s->process == 2 ? iir_ch_parallel_fltp : s->process == 1 ? iir_ch_serial_fltp : iir_ch_fltp; break;
1360  case AV_SAMPLE_FMT_S32P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s32p : s->process == 1 ? iir_ch_serial_s32p : iir_ch_s32p; break;
1361  case AV_SAMPLE_FMT_S16P: s->iir_channel = s->process == 2 ? iir_ch_parallel_s16p : s->process == 1 ? iir_ch_serial_s16p : iir_ch_s16p; break;
1362  }
1363 
1364  if (s->format == -2) {
1365  switch (inlink->format) {
1366  case AV_SAMPLE_FMT_DBLP: s->iir_channel = iir_ch_lattice_dblp; break;
1367  case AV_SAMPLE_FMT_FLTP: s->iir_channel = iir_ch_lattice_fltp; break;
1368  case AV_SAMPLE_FMT_S32P: s->iir_channel = iir_ch_lattice_s32p; break;
1369  case AV_SAMPLE_FMT_S16P: s->iir_channel = iir_ch_lattice_s16p; break;
1370  }
1371  }
1372 
1373  return 0;
1374 }
1375 
1377 {
1378  AVFilterContext *ctx = inlink->dst;
1379  AudioIIRContext *s = ctx->priv;
1380  AVFilterLink *outlink = ctx->outputs[0];
1381  ThreadData td;
1382  AVFrame *out;
1383  int ch, ret;
1384 
1385  if (av_frame_is_writable(in) && s->process != 2) {
1386  out = in;
1387  } else {
1388  out = ff_get_audio_buffer(outlink, in->nb_samples);
1389  if (!out) {
1390  av_frame_free(&in);
1391  return AVERROR(ENOMEM);
1392  }
1393  av_frame_copy_props(out, in);
1394  }
1395 
1396  td.in = in;
1397  td.out = out;
1398  ff_filter_execute(ctx, s->iir_channel, &td, NULL, outlink->ch_layout.nb_channels);
1399 
1400  for (ch = 0; ch < outlink->ch_layout.nb_channels; ch++) {
1401  if (s->iir[ch].clippings > 0)
1402  av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
1403  ch, s->iir[ch].clippings);
1404  s->iir[ch].clippings = 0;
1405  }
1406 
1407  if (in != out)
1408  av_frame_free(&in);
1409 
1410  if (s->response) {
1411  AVFilterLink *outlink = ctx->outputs[1];
1412  int64_t old_pts = s->video->pts;
1413  int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base);
1414 
1415  if (new_pts > old_pts) {
1416  AVFrame *clone;
1417 
1418  s->video->pts = new_pts;
1419  clone = av_frame_clone(s->video);
1420  if (!clone)
1421  return AVERROR(ENOMEM);
1422  ret = ff_filter_frame(outlink, clone);
1423  if (ret < 0)
1424  return ret;
1425  }
1426  }
1427 
1428  return ff_filter_frame(outlink, out);
1429 }
1430 
1431 static int config_video(AVFilterLink *outlink)
1432 {
1433  FilterLink *l = ff_filter_link(outlink);
1434  AVFilterContext *ctx = outlink->src;
1435  AudioIIRContext *s = ctx->priv;
1436 
1437  outlink->sample_aspect_ratio = (AVRational){1,1};
1438  outlink->w = s->w;
1439  outlink->h = s->h;
1440  l->frame_rate = s->rate;
1441  outlink->time_base = av_inv_q(l->frame_rate);
1442 
1443  return 0;
1444 }
1445 
1447 {
1448  AudioIIRContext *s = ctx->priv;
1449  AVFilterPad pad, vpad;
1450  int ret;
1451 
1452  if (!s->a_str || !s->b_str || !s->g_str) {
1453  av_log(ctx, AV_LOG_ERROR, "Valid coefficients are mandatory.\n");
1454  return AVERROR(EINVAL);
1455  }
1456 
1457  switch (s->precision) {
1458  case 0: s->sample_format = AV_SAMPLE_FMT_DBLP; break;
1459  case 1: s->sample_format = AV_SAMPLE_FMT_FLTP; break;
1460  case 2: s->sample_format = AV_SAMPLE_FMT_S32P; break;
1461  case 3: s->sample_format = AV_SAMPLE_FMT_S16P; break;
1462  default: return AVERROR_BUG;
1463  }
1464 
1465  pad = (AVFilterPad){
1466  .name = "default",
1467  .type = AVMEDIA_TYPE_AUDIO,
1468  .config_props = config_output,
1469  };
1470 
1471  ret = ff_append_outpad(ctx, &pad);
1472  if (ret < 0)
1473  return ret;
1474 
1475  if (s->response) {
1476  vpad = (AVFilterPad){
1477  .name = "filter_response",
1478  .type = AVMEDIA_TYPE_VIDEO,
1479  .config_props = config_video,
1480  };
1481 
1482  ret = ff_append_outpad(ctx, &vpad);
1483  if (ret < 0)
1484  return ret;
1485  }
1486 
1487  return 0;
1488 }
1489 
1491 {
1492  AudioIIRContext *s = ctx->priv;
1493  int ch;
1494 
1495  if (s->iir) {
1496  for (ch = 0; ch < s->channels; ch++) {
1497  IIRChannel *iir = &s->iir[ch];
1498  av_freep(&iir->ab[0]);
1499  av_freep(&iir->ab[1]);
1500  av_freep(&iir->cache[0]);
1501  av_freep(&iir->cache[1]);
1502  av_freep(&iir->biquads);
1503  }
1504  }
1505  av_freep(&s->iir);
1506 
1507  av_frame_free(&s->video);
1508 }
1509 
1510 static const AVFilterPad inputs[] = {
1511  {
1512  .name = "default",
1513  .type = AVMEDIA_TYPE_AUDIO,
1514  .filter_frame = filter_frame,
1515  },
1516 };
1517 
1518 #define OFFSET(x) offsetof(AudioIIRContext, x)
1519 #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1520 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
1521 
1522 static const AVOption aiir_options[] = {
1523  { "zeros", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1524  { "z", "set B/numerator/zeros/reflection coefficients", OFFSET(b_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1525  { "poles", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1526  { "p", "set A/denominator/poles/ladder coefficients", OFFSET(a_str), AV_OPT_TYPE_STRING, {.str="1+0i 1-0i"}, 0, 0, AF },
1527  { "gains", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1528  { "k", "set channels gains", OFFSET(g_str), AV_OPT_TYPE_STRING, {.str="1|1"}, 0, 0, AF },
1529  { "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1530  { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1531  { "format", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, .unit = "format" },
1532  { "f", "set coefficients format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=1}, -2, 4, AF, .unit = "format" },
1533  { "ll", "lattice-ladder function", 0, AV_OPT_TYPE_CONST, {.i64=-2}, 0, 0, AF, .unit = "format" },
1534  { "sf", "analog transfer function", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, .unit = "format" },
1535  { "tf", "digital transfer function", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "format" },
1536  { "zp", "Z-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "format" },
1537  { "pr", "Z-plane zeros/poles (polar radians)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "format" },
1538  { "pd", "Z-plane zeros/poles (polar degrees)", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, .unit = "format" },
1539  { "sp", "S-plane zeros/poles", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, AF, .unit = "format" },
1540  { "process", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, .unit = "process" },
1541  { "r", "set kind of processing", OFFSET(process), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, .unit = "process" },
1542  { "d", "direct", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "process" },
1543  { "s", "serial", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "process" },
1544  { "p", "parallel", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "process" },
1545  { "precision", "set filtering precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, .unit = "precision" },
1546  { "e", "set precision", OFFSET(precision),AV_OPT_TYPE_INT, {.i64=0}, 0, 3, AF, .unit = "precision" },
1547  { "dbl", "double-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, .unit = "precision" },
1548  { "flt", "single-precision floating-point", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, .unit = "precision" },
1549  { "i32", "32-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, .unit = "precision" },
1550  { "i16", "16-bit integers", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, AF, .unit = "precision" },
1551  { "normalize", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1552  { "n", "normalize coefficients", OFFSET(normalize),AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
1553  { "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, AF },
1554  { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
1555  { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
1556  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
1557  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
1558  { NULL },
1559 };
1560 
1561 AVFILTER_DEFINE_CLASS(aiir);
1562 
1564  .name = "aiir",
1565  .description = NULL_IF_CONFIG_SMALL("Apply Infinite Impulse Response filter with supplied coefficients."),
1566  .priv_size = sizeof(AudioIIRContext),
1567  .priv_class = &aiir_class,
1568  .init = init,
1569  .uninit = uninit,
1574 };
coef_sf2zf
static double coef_sf2zf(double *a, int N, int n)
Definition: af_aiir.c:942
Pair
Definition: af_aiir.c:38
M
#define M(a, b)
Definition: vp3dsp.c:48
AudioIIRContext::format
int format
Definition: af_aiir.c:64
formats
formats
Definition: signature.h:47
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:116
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:98
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
process
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
Definition: vf_normalize.c:155
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
W
@ W
Definition: vf_addroi.c:27
av_clip
#define av_clip
Definition: common.h:100
mix
static int mix(int c0, int c1)
Definition: 4xm.c:716
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:435
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:422
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(aiir)
out
FILE * out
Definition: movenc.c:55
color
Definition: vf_paletteuse.c:513
IIRChannel::clippings
int clippings
Definition: af_aiir.c:55
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1061
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
Underlying C type is AVRational.
Definition: opt.h:315
AF
#define AF
Definition: af_aiir.c:1519
inputs
static const AVFilterPad inputs[]
Definition: af_aiir.c:1510
matrix
Definition: vc1dsp.c:43
IIRChannel::nb_ab
int nb_ab[2]
Definition: af_aiir.c:49
int64_t
long long int64_t
Definition: coverity.c:34
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
BiquadContext::a
double a[3]
Definition: af_aiir.c:43
aiir_options
static const AVOption aiir_options[]
Definition: af_aiir.c:1522
convert_serial2parallel
static int convert_serial2parallel(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:816
mask
int mask
Definition: mediacodecdec_common.c:154
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
RE
#define RE(x, ch)
FILTER_INPUTS
#define FILTER_INPUTS(array)
Definition: filters.h:262
read_channels
static int read_channels(AVFilterContext *ctx, int channels, uint8_t *item_str, int ab)
Definition: af_aiir.c:446
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AudioIIRContext::ir_channel
int ir_channel
Definition: af_aiir.c:69
IIRChannel::biquads
BiquadContext * biquads
Definition: af_aiir.c:54
w
uint8_t w
Definition: llviddspenc.c:38
query_formats
static int query_formats(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out)
Definition: af_aiir.c:81
AVOption
AVOption.
Definition: opt.h:429
b
#define b
Definition: input.c:41
IIR_CH
#define IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:111
AV_SAMPLE_FMT_S32P
@ AV_SAMPLE_FMT_S32P
signed 32 bits, planar
Definition: samplefmt.h:65
read_tf_coefficients
static int read_tf_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst)
Definition: af_aiir.c:394
AudioIIRContext::iir_channel
int(* iir_channel)(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
Definition: af_aiir.c:78
check_stability
static void check_stability(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:1016
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
config_output
static int config_output(AVFilterLink *outlink)
Definition: af_aiir.c:1253
float.h
max
#define max(a, b)
Definition: cuda_runtime.h:33
AudioIIRContext::b_str
char * b_str
Definition: af_aiir.c:60
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
solve
static void solve(double *matrix, double *vector, int n, double *y, double *x, double *lu)
Definition: af_aiir.c:782
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:205
ThreadData::out
AVFrame * out
Definition: af_adeclick.c:526
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_aiir.c:1431
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:327
AudioIIRContext::video
AVFrame * video
Definition: af_aiir.c:72
video.h
ThreadData::in
AVFrame * in
Definition: af_adecorrelate.c:155
AudioIIRContext::g_str
char * g_str
Definition: af_aiir.c:60
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
BiquadContext
Definition: af_aiir.c:42
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:2034
fail
#define fail()
Definition: checkasm.h:189
a2
static double a2(void *priv, double x, double y)
Definition: vf_xfade.c:2030
AVFilterPad
A filter pad used for either input or output.
Definition: filters.h:38
AudioIIRContext::h
int h
Definition: af_aiir.c:68
AudioIIRContext::process
int process
Definition: af_aiir.c:65
ceil
static __device__ float ceil(float a)
Definition: cuda_runtime.h:176
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_aiir.c:1376
convert_zp2tf
static int convert_zp2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:554
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Underlying C type is double.
Definition: opt.h:267
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:678
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:178
filters.h
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
ctx
AVFormatContext * ctx
Definition: movenc.c:49
channels
channels
Definition: aptx.h:31
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:609
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
IIRChannel::cache
double * cache[2]
Definition: af_aiir.c:52
NAN
#define NAN
Definition: mathematics.h:115
SERIAL_IIR_CH
#define SERIAL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:167
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
av_sscanf
int av_sscanf(const char *string, const char *format,...)
See libc sscanf manual for more information.
Definition: avsscanf.c:961
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:75
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
biquad
@ biquad
Definition: af_biquads.c:78
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:725
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
Underlying C type is two consecutive integers.
Definition: opt.h:303
double
double
Definition: af_crystalizer.c:132
AudioIIRContext::rate
AVRational rate
Definition: af_aiir.c:70
normalize_coeffs
static void normalize_coeffs(AVFilterContext *ctx, int ch)
Definition: af_aiir.c:526
exp
int8_t exp
Definition: eval.c:73
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_aiir.c:1446
AVFilterFormatsConfig
Lists of formats / etc.
Definition: avfilter.h:111
ff_filter_link
static FilterLink * ff_filter_link(AVFilterLink *link)
Definition: filters.h:197
convert_sp2zp
static void convert_sp2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:908
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:147
BiquadContext::w1
double w1
Definition: af_aiir.c:45
BiquadContext::w2
double w2
Definition: af_aiir.c:45
format
static const char *const format[]
Definition: af_aiir.c:444
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:94
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
draw_line
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
Definition: af_aiir.c:1058
fmin
double fmin(double, double)
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:661
read_zp_coefficients
static int read_zp_coefficients(AVFilterContext *ctx, char *item_str, int nb_items, double *dst, const char *format)
Definition: af_aiir.c:419
IM
#define IM(x, ch)
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:2035
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:263
xga_font_data.h
N
#define N
Definition: af_mcompand.c:54
fact
static double fact(double i)
Definition: af_aiir.c:935
IIRChannel::g
double g
Definition: af_aiir.c:51
Pair::b
int b
Definition: af_aiir.c:39
M_PI
#define M_PI
Definition: mathematics.h:67
AV_SAMPLE_FMT_S16P
@ AV_SAMPLE_FMT_S16P
signed 16 bits, planar
Definition: samplefmt.h:64
AudioIIRContext::a_str
char * a_str
Definition: af_aiir.c:60
normalize
Definition: normalize.py:1
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:469
VF
#define VF
Definition: af_aiir.c:1520
AudioIIRContext::sample_format
enum AVSampleFormat sample_format
Definition: af_aiir.c:76
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
OFFSET
#define OFFSET(x)
Definition: af_aiir.c:1518
IIRChannel::ab
double * ab[2]
Definition: af_aiir.c:50
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
ThreadData
Used for passing data between threads.
Definition: dsddec.c:71
FILTER_QUERY_FUNC2
#define FILTER_QUERY_FUNC2(func)
Definition: filters.h:239
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AudioIIRContext::precision
int precision
Definition: af_aiir.c:66
convert_sf2tf
static void convert_sf2tf(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:961
AVFilterPad::name
const char * name
Pad name.
Definition: filters.h:44
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
AudioIIRContext::wet_gain
double wet_gain
Definition: af_aiir.c:61
get_response
static void get_response(int channel, int format, double w, const double *b, const double *a, int nb_b, int nb_a, double *magnitude, double *phase)
Definition: af_aiir.c:1089
AVFilter
Filter definition.
Definition: avfilter.h:201
cmul
static void cmul(double re, double im, double re2, double im2, double *RE, double *IM)
Definition: af_aiir.c:493
PARALLEL_IIR_CH
#define PARALLEL_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:226
ret
ret
Definition: filter_design.txt:187
AudioIIRContext::dry_gain
double dry_gain
Definition: af_aiir.c:61
Pair::a
int a
Definition: af_aiir.c:39
fmax
double fmax(double, double)
ff_set_common_formats_from_list2
int ff_set_common_formats_from_list2(const AVFilterContext *ctx, AVFilterFormatsConfig **cfg_in, AVFilterFormatsConfig **cfg_out, const int *fmts)
Definition: formats.c:1016
convert_pr2zp
static void convert_pr2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:881
BiquadContext::b
double b[3]
Definition: af_aiir.c:44
ff_filter_execute
int ff_filter_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, int *ret, int nb_jobs)
Definition: avfilter.c:1666
draw_response
static void draw_response(AVFilterContext *ctx, AVFrame *out, int sample_rate)
Definition: af_aiir.c:1136
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Underlying C type is int.
Definition: opt.h:259
avfilter.h
AudioIIRContext::response
int response
Definition: af_aiir.c:67
distance
static double distance(double x0, double x1, double y0, double y1)
Definition: af_aiir.c:1084
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:67
temp
else temp
Definition: vf_mcdeint.c:263
AudioIIRContext::channels
int channels
Definition: af_aiir.c:75
decompose_zp2biquads
static int decompose_zp2biquads(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:602
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
factor
static const int factor[16]
Definition: vf_pp7.c:80
AVFILTER_FLAG_SLICE_THREADS
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:152
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
LATTICE_IIR_CH
#define LATTICE_IIR_CH(name, type, min, max, need_clipping)
Definition: af_aiir.c:290
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
audio.h
count_coefficients
static void count_coefficients(char *item_str, int *nb_items)
Definition: af_aiir.c:347
biquad_process
static void biquad_process(double *x, double *y, int length, double b0, double b1, double b2, double a1, double a2)
Definition: af_aiir.c:764
expand
static int expand(AVFilterContext *ctx, double *pz, int n, double *coefs)
Definition: af_aiir.c:499
ff_append_outpad
int ff_append_outpad(AVFilterContext *f, AVFilterPad *p)
Definition: avfilter.c:138
avpriv_cga_font
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
AudioIIRContext
Definition: af_aiir.c:58
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Underlying C type is int.
Definition: opt.h:327
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
AudioIIRContext::iir
IIRChannel * iir
Definition: af_aiir.c:74
int32_t
int32_t
Definition: audioconvert.c:56
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:434
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
b0
static double b0(void *priv, double x, double y)
Definition: vf_xfade.c:2033
a1
static double a1(void *priv, double x, double y)
Definition: vf_xfade.c:2029
IIRChannel::fir
double fir
Definition: af_aiir.c:53
ff_af_aiir
const AVFilter ff_af_aiir
Definition: af_aiir.c:1563
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
AudioIIRContext::normalize
int normalize
Definition: af_aiir.c:63
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Special option type for declaring named constants.
Definition: opt.h:299
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_aiir.c:1490
snprintf
#define snprintf
Definition: snprintf.h:34
read_gains
static int read_gains(AVFilterContext *ctx, char *item_str, int nb_items)
Definition: af_aiir.c:361
IIRChannel
Definition: af_aiir.c:48
AudioIIRContext::mix
double mix
Definition: af_aiir.c:62
convert_pd2zp
static void convert_pd2zp(AVFilterContext *ctx, int channels)
Definition: af_aiir.c:989
drawtext
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
Definition: af_aiir.c:1035
channel
channel
Definition: ebur128.h:39
min
float min
Definition: vorbis_enc_data.h:429
AudioIIRContext::w
int w
Definition: af_aiir.c:68