FFmpeg
af_dynaudnorm.c
Go to the documentation of this file.
1 /*
2  * Dynamic Audio Normalizer
3  * Copyright (c) 2015 LoRd_MuldeR <mulder2@gmx.de>. Some rights reserved.
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Dynamic Audio Normalizer
25  */
26 
27 #include <float.h>
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/opt.h"
31 
32 #define FF_BUFQUEUE_SIZE 302
34 
35 #include "audio.h"
36 #include "avfilter.h"
37 #include "filters.h"
38 #include "internal.h"
39 
40 typedef struct cqueue {
41  double *elements;
42  int size;
44  int first;
45 } cqueue;
46 
48  const AVClass *class;
49 
50  struct FFBufQueue queue;
51 
52  int frame_len;
58 
59  double peak_value;
61  double target_rms;
66  double *fade_factors[2];
67  double *weights;
68 
69  int channels;
70  int delay;
71  int eof;
72  int64_t pts;
73 
77 
80 
81 #define OFFSET(x) offsetof(DynamicAudioNormalizerContext, x)
82 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
83 
84 static const AVOption dynaudnorm_options[] = {
85  { "f", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
86  { "g", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
87  { "p", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
88  { "m", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
89  { "r", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
90  { "n", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
91  { "c", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
92  { "b", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
93  { "s", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
94  { NULL }
95 };
96 
97 AVFILTER_DEFINE_CLASS(dynaudnorm);
98 
100 {
102 
103  if (!(s->filter_size & 1)) {
104  av_log(ctx, AV_LOG_ERROR, "filter size %d is invalid. Must be an odd value.\n", s->filter_size);
105  return AVERROR(EINVAL);
106  }
107 
108  return 0;
109 }
110 
112 {
115  static const enum AVSampleFormat sample_fmts[] = {
118  };
119  int ret;
120 
122  if (!layouts)
123  return AVERROR(ENOMEM);
125  if (ret < 0)
126  return ret;
127 
129  if (!formats)
130  return AVERROR(ENOMEM);
132  if (ret < 0)
133  return ret;
134 
136  if (!formats)
137  return AVERROR(ENOMEM);
139 }
140 
141 static inline int frame_size(int sample_rate, int frame_len_msec)
142 {
143  const int frame_size = lrint((double)sample_rate * (frame_len_msec / 1000.0));
144  return frame_size + (frame_size % 2);
145 }
146 
147 static void precalculate_fade_factors(double *fade_factors[2], int frame_len)
148 {
149  const double step_size = 1.0 / frame_len;
150  int pos;
151 
152  for (pos = 0; pos < frame_len; pos++) {
153  fade_factors[0][pos] = 1.0 - (step_size * (pos + 1.0));
154  fade_factors[1][pos] = 1.0 - fade_factors[0][pos];
155  }
156 }
157 
159 {
160  cqueue *q;
161 
162  q = av_malloc(sizeof(cqueue));
163  if (!q)
164  return NULL;
165 
166  q->size = size;
167  q->nb_elements = 0;
168  q->first = 0;
169 
170  q->elements = av_malloc_array(size, sizeof(double));
171  if (!q->elements) {
172  av_free(q);
173  return NULL;
174  }
175 
176  return q;
177 }
178 
179 static void cqueue_free(cqueue *q)
180 {
181  if (q)
182  av_free(q->elements);
183  av_free(q);
184 }
185 
186 static int cqueue_size(cqueue *q)
187 {
188  return q->nb_elements;
189 }
190 
191 static int cqueue_empty(cqueue *q)
192 {
193  return !q->nb_elements;
194 }
195 
196 static int cqueue_enqueue(cqueue *q, double element)
197 {
198  int i;
199 
200  av_assert2(q->nb_elements != q->size);
201 
202  i = (q->first + q->nb_elements) % q->size;
203  q->elements[i] = element;
204  q->nb_elements++;
205 
206  return 0;
207 }
208 
209 static double cqueue_peek(cqueue *q, int index)
210 {
211  av_assert2(index < q->nb_elements);
212  return q->elements[(q->first + index) % q->size];
213 }
214 
215 static int cqueue_dequeue(cqueue *q, double *element)
216 {
218 
219  *element = q->elements[q->first];
220  q->first = (q->first + 1) % q->size;
221  q->nb_elements--;
222 
223  return 0;
224 }
225 
226 static int cqueue_pop(cqueue *q)
227 {
229 
230  q->first = (q->first + 1) % q->size;
231  q->nb_elements--;
232 
233  return 0;
234 }
235 
237 {
238  double total_weight = 0.0;
239  const double sigma = (((s->filter_size / 2.0) - 1.0) / 3.0) + (1.0 / 3.0);
240  double adjust;
241  int i;
242 
243  // Pre-compute constants
244  const int offset = s->filter_size / 2;
245  const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
246  const double c2 = 2.0 * sigma * sigma;
247 
248  // Compute weights
249  for (i = 0; i < s->filter_size; i++) {
250  const int x = i - offset;
251 
252  s->weights[i] = c1 * exp(-x * x / c2);
253  total_weight += s->weights[i];
254  }
255 
256  // Adjust weights
257  adjust = 1.0 / total_weight;
258  for (i = 0; i < s->filter_size; i++) {
259  s->weights[i] *= adjust;
260  }
261 }
262 
264 {
266  int c;
267 
268  av_freep(&s->prev_amplification_factor);
269  av_freep(&s->dc_correction_value);
270  av_freep(&s->compress_threshold);
271  av_freep(&s->fade_factors[0]);
272  av_freep(&s->fade_factors[1]);
273 
274  for (c = 0; c < s->channels; c++) {
275  if (s->gain_history_original)
276  cqueue_free(s->gain_history_original[c]);
277  if (s->gain_history_minimum)
278  cqueue_free(s->gain_history_minimum[c]);
279  if (s->gain_history_smoothed)
280  cqueue_free(s->gain_history_smoothed[c]);
281  }
282 
283  av_freep(&s->gain_history_original);
284  av_freep(&s->gain_history_minimum);
285  av_freep(&s->gain_history_smoothed);
286 
287  cqueue_free(s->is_enabled);
288  s->is_enabled = NULL;
289 
290  av_freep(&s->weights);
291 
292  ff_bufqueue_discard_all(&s->queue);
293 }
294 
296 {
297  AVFilterContext *ctx = inlink->dst;
299  int c;
300 
301  uninit(ctx);
302 
303  s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec);
304  av_log(ctx, AV_LOG_DEBUG, "frame len %d\n", s->frame_len);
305 
306  s->fade_factors[0] = av_malloc_array(s->frame_len, sizeof(*s->fade_factors[0]));
307  s->fade_factors[1] = av_malloc_array(s->frame_len, sizeof(*s->fade_factors[1]));
308 
309  s->prev_amplification_factor = av_malloc_array(inlink->channels, sizeof(*s->prev_amplification_factor));
310  s->dc_correction_value = av_calloc(inlink->channels, sizeof(*s->dc_correction_value));
311  s->compress_threshold = av_calloc(inlink->channels, sizeof(*s->compress_threshold));
312  s->gain_history_original = av_calloc(inlink->channels, sizeof(*s->gain_history_original));
313  s->gain_history_minimum = av_calloc(inlink->channels, sizeof(*s->gain_history_minimum));
314  s->gain_history_smoothed = av_calloc(inlink->channels, sizeof(*s->gain_history_smoothed));
315  s->weights = av_malloc_array(s->filter_size, sizeof(*s->weights));
316  s->is_enabled = cqueue_create(s->filter_size);
317  if (!s->prev_amplification_factor || !s->dc_correction_value ||
318  !s->compress_threshold || !s->fade_factors[0] || !s->fade_factors[1] ||
319  !s->gain_history_original || !s->gain_history_minimum ||
320  !s->gain_history_smoothed || !s->is_enabled || !s->weights)
321  return AVERROR(ENOMEM);
322 
323  for (c = 0; c < inlink->channels; c++) {
324  s->prev_amplification_factor[c] = 1.0;
325 
326  s->gain_history_original[c] = cqueue_create(s->filter_size);
327  s->gain_history_minimum[c] = cqueue_create(s->filter_size);
328  s->gain_history_smoothed[c] = cqueue_create(s->filter_size);
329 
330  if (!s->gain_history_original[c] || !s->gain_history_minimum[c] ||
331  !s->gain_history_smoothed[c])
332  return AVERROR(ENOMEM);
333  }
334 
335  precalculate_fade_factors(s->fade_factors, s->frame_len);
337 
338  s->channels = inlink->channels;
339  s->delay = s->filter_size;
340 
341  return 0;
342 }
343 
344 static inline double fade(double prev, double next, int pos,
345  double *fade_factors[2])
346 {
347  return fade_factors[0][pos] * prev + fade_factors[1][pos] * next;
348 }
349 
350 static inline double pow_2(const double value)
351 {
352  return value * value;
353 }
354 
355 static inline double bound(const double threshold, const double val)
356 {
357  const double CONST = 0.8862269254527580136490837416705725913987747280611935; //sqrt(PI) / 2.0
358  return erf(CONST * (val / threshold)) * threshold;
359 }
360 
362 {
363  double max = DBL_EPSILON;
364  int c, i;
365 
366  if (channel == -1) {
367  for (c = 0; c < frame->channels; c++) {
368  double *data_ptr = (double *)frame->extended_data[c];
369 
370  for (i = 0; i < frame->nb_samples; i++)
371  max = FFMAX(max, fabs(data_ptr[i]));
372  }
373  } else {
374  double *data_ptr = (double *)frame->extended_data[channel];
375 
376  for (i = 0; i < frame->nb_samples; i++)
377  max = FFMAX(max, fabs(data_ptr[i]));
378  }
379 
380  return max;
381 }
382 
384 {
385  double rms_value = 0.0;
386  int c, i;
387 
388  if (channel == -1) {
389  for (c = 0; c < frame->channels; c++) {
390  const double *data_ptr = (double *)frame->extended_data[c];
391 
392  for (i = 0; i < frame->nb_samples; i++) {
393  rms_value += pow_2(data_ptr[i]);
394  }
395  }
396 
397  rms_value /= frame->nb_samples * frame->channels;
398  } else {
399  const double *data_ptr = (double *)frame->extended_data[channel];
400  for (i = 0; i < frame->nb_samples; i++) {
401  rms_value += pow_2(data_ptr[i]);
402  }
403 
404  rms_value /= frame->nb_samples;
405  }
406 
407  return FFMAX(sqrt(rms_value), DBL_EPSILON);
408 }
409 
411  int channel)
412 {
413  const double maximum_gain = s->peak_value / find_peak_magnitude(frame, channel);
414  const double rms_gain = s->target_rms > DBL_EPSILON ? (s->target_rms / compute_frame_rms(frame, channel)) : DBL_MAX;
415  return bound(s->max_amplification, FFMIN(maximum_gain, rms_gain));
416 }
417 
418 static double minimum_filter(cqueue *q)
419 {
420  double min = DBL_MAX;
421  int i;
422 
423  for (i = 0; i < cqueue_size(q); i++) {
424  min = FFMIN(min, cqueue_peek(q, i));
425  }
426 
427  return min;
428 }
429 
431 {
432  double result = 0.0;
433  int i;
434 
435  for (i = 0; i < cqueue_size(q); i++) {
436  result += cqueue_peek(q, i) * s->weights[i];
437  }
438 
439  return result;
440 }
441 
443  double current_gain_factor)
444 {
445  if (cqueue_empty(s->gain_history_original[channel]) ||
446  cqueue_empty(s->gain_history_minimum[channel])) {
447  const int pre_fill_size = s->filter_size / 2;
448  const double initial_value = s->alt_boundary_mode ? current_gain_factor : 1.0;
449 
450  s->prev_amplification_factor[channel] = initial_value;
451 
452  while (cqueue_size(s->gain_history_original[channel]) < pre_fill_size) {
453  cqueue_enqueue(s->gain_history_original[channel], initial_value);
454  }
455  }
456 
457  cqueue_enqueue(s->gain_history_original[channel], current_gain_factor);
458 
459  while (cqueue_size(s->gain_history_original[channel]) >= s->filter_size) {
460  double minimum;
461  av_assert0(cqueue_size(s->gain_history_original[channel]) == s->filter_size);
462 
463  if (cqueue_empty(s->gain_history_minimum[channel])) {
464  const int pre_fill_size = s->filter_size / 2;
465  double initial_value = s->alt_boundary_mode ? cqueue_peek(s->gain_history_original[channel], 0) : 1.0;
466  int input = pre_fill_size;
467 
468  while (cqueue_size(s->gain_history_minimum[channel]) < pre_fill_size) {
469  input++;
470  initial_value = FFMIN(initial_value, cqueue_peek(s->gain_history_original[channel], input));
471  cqueue_enqueue(s->gain_history_minimum[channel], initial_value);
472  }
473  }
474 
475  minimum = minimum_filter(s->gain_history_original[channel]);
476 
477  cqueue_enqueue(s->gain_history_minimum[channel], minimum);
478 
479  cqueue_pop(s->gain_history_original[channel]);
480  }
481 
482  while (cqueue_size(s->gain_history_minimum[channel]) >= s->filter_size) {
483  double smoothed;
484  av_assert0(cqueue_size(s->gain_history_minimum[channel]) == s->filter_size);
485  smoothed = gaussian_filter(s, s->gain_history_minimum[channel]);
486 
487  cqueue_enqueue(s->gain_history_smoothed[channel], smoothed);
488 
489  cqueue_pop(s->gain_history_minimum[channel]);
490  }
491 }
492 
493 static inline double update_value(double new, double old, double aggressiveness)
494 {
495  av_assert0((aggressiveness >= 0.0) && (aggressiveness <= 1.0));
496  return aggressiveness * new + (1.0 - aggressiveness) * old;
497 }
498 
500 {
501  const double diff = 1.0 / frame->nb_samples;
502  int is_first_frame = cqueue_empty(s->gain_history_original[0]);
503  int c, i;
504 
505  for (c = 0; c < s->channels; c++) {
506  double *dst_ptr = (double *)frame->extended_data[c];
507  double current_average_value = 0.0;
508  double prev_value;
509 
510  for (i = 0; i < frame->nb_samples; i++)
511  current_average_value += dst_ptr[i] * diff;
512 
513  prev_value = is_first_frame ? current_average_value : s->dc_correction_value[c];
514  s->dc_correction_value[c] = is_first_frame ? current_average_value : update_value(current_average_value, s->dc_correction_value[c], 0.1);
515 
516  for (i = 0; i < frame->nb_samples; i++) {
517  dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, s->fade_factors);
518  }
519  }
520 }
521 
522 static double setup_compress_thresh(double threshold)
523 {
524  if ((threshold > DBL_EPSILON) && (threshold < (1.0 - DBL_EPSILON))) {
525  double current_threshold = threshold;
526  double step_size = 1.0;
527 
528  while (step_size > DBL_EPSILON) {
529  while ((llrint((current_threshold + step_size) * (UINT64_C(1) << 63)) >
530  llrint(current_threshold * (UINT64_C(1) << 63))) &&
531  (bound(current_threshold + step_size, 1.0) <= threshold)) {
532  current_threshold += step_size;
533  }
534 
535  step_size /= 2.0;
536  }
537 
538  return current_threshold;
539  } else {
540  return threshold;
541  }
542 }
543 
545  AVFrame *frame, int channel)
546 {
547  double variance = 0.0;
548  int i, c;
549 
550  if (channel == -1) {
551  for (c = 0; c < s->channels; c++) {
552  const double *data_ptr = (double *)frame->extended_data[c];
553 
554  for (i = 0; i < frame->nb_samples; i++) {
555  variance += pow_2(data_ptr[i]); // Assume that MEAN is *zero*
556  }
557  }
558  variance /= (s->channels * frame->nb_samples) - 1;
559  } else {
560  const double *data_ptr = (double *)frame->extended_data[channel];
561 
562  for (i = 0; i < frame->nb_samples; i++) {
563  variance += pow_2(data_ptr[i]); // Assume that MEAN is *zero*
564  }
565  variance /= frame->nb_samples - 1;
566  }
567 
568  return FFMAX(sqrt(variance), DBL_EPSILON);
569 }
570 
572 {
573  int is_first_frame = cqueue_empty(s->gain_history_original[0]);
574  int c, i;
575 
576  if (s->channels_coupled) {
577  const double standard_deviation = compute_frame_std_dev(s, frame, -1);
578  const double current_threshold = FFMIN(1.0, s->compress_factor * standard_deviation);
579 
580  const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[0];
581  double prev_actual_thresh, curr_actual_thresh;
582  s->compress_threshold[0] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[0], (1.0/3.0));
583 
584  prev_actual_thresh = setup_compress_thresh(prev_value);
585  curr_actual_thresh = setup_compress_thresh(s->compress_threshold[0]);
586 
587  for (c = 0; c < s->channels; c++) {
588  double *const dst_ptr = (double *)frame->extended_data[c];
589  for (i = 0; i < frame->nb_samples; i++) {
590  const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, s->fade_factors);
591  dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
592  }
593  }
594  } else {
595  for (c = 0; c < s->channels; c++) {
596  const double standard_deviation = compute_frame_std_dev(s, frame, c);
597  const double current_threshold = setup_compress_thresh(FFMIN(1.0, s->compress_factor * standard_deviation));
598 
599  const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[c];
600  double prev_actual_thresh, curr_actual_thresh;
601  double *dst_ptr;
602  s->compress_threshold[c] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[c], 1.0/3.0);
603 
604  prev_actual_thresh = setup_compress_thresh(prev_value);
605  curr_actual_thresh = setup_compress_thresh(s->compress_threshold[c]);
606 
607  dst_ptr = (double *)frame->extended_data[c];
608  for (i = 0; i < frame->nb_samples; i++) {
609  const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, s->fade_factors);
610  dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
611  }
612  }
613  }
614 }
615 
617 {
618  if (s->dc_correction) {
620  }
621 
622  if (s->compress_factor > DBL_EPSILON) {
624  }
625 
626  if (s->channels_coupled) {
627  const double current_gain_factor = get_max_local_gain(s, frame, -1);
628  int c;
629 
630  for (c = 0; c < s->channels; c++)
631  update_gain_history(s, c, current_gain_factor);
632  } else {
633  int c;
634 
635  for (c = 0; c < s->channels; c++)
637  }
638 }
639 
641 {
642  int c, i;
643 
644  for (c = 0; c < s->channels; c++) {
645  double *dst_ptr = (double *)frame->extended_data[c];
646  double current_amplification_factor;
647 
648  cqueue_dequeue(s->gain_history_smoothed[c], &current_amplification_factor);
649 
650  for (i = 0; i < frame->nb_samples && enabled; i++) {
651  const double amplification_factor = fade(s->prev_amplification_factor[c],
652  current_amplification_factor, i,
653  s->fade_factors);
654 
655  dst_ptr[i] *= amplification_factor;
656 
657  if (fabs(dst_ptr[i]) > s->peak_value)
658  dst_ptr[i] = copysign(s->peak_value, dst_ptr[i]);
659  }
660 
661  s->prev_amplification_factor[c] = current_amplification_factor;
662  }
663 }
664 
666 {
667  AVFilterContext *ctx = inlink->dst;
669  AVFilterLink *outlink = inlink->dst->outputs[0];
670  int ret = 1;
671 
672  if (!cqueue_empty(s->gain_history_smoothed[0])) {
673  double is_enabled;
674  AVFrame *out = ff_bufqueue_get(&s->queue);
675 
676  cqueue_dequeue(s->is_enabled, &is_enabled);
677 
678  amplify_frame(s, out, is_enabled > 0.);
679  ret = ff_filter_frame(outlink, out);
680  }
681 
683  cqueue_enqueue(s->is_enabled, !ctx->is_disabled);
684  analyze_frame(s, in);
685  ff_bufqueue_add(ctx, &s->queue, in);
686 
687  return ret;
688 }
689 
691  AVFilterLink *outlink)
692 {
693  AVFrame *out = ff_get_audio_buffer(outlink, s->frame_len);
694  int c, i;
695 
696  if (!out)
697  return AVERROR(ENOMEM);
698 
699  for (c = 0; c < s->channels; c++) {
700  double *dst_ptr = (double *)out->extended_data[c];
701 
702  for (i = 0; i < out->nb_samples; i++) {
703  dst_ptr[i] = s->alt_boundary_mode ? DBL_EPSILON : ((s->target_rms > DBL_EPSILON) ? FFMIN(s->peak_value, s->target_rms) : s->peak_value);
704  if (s->dc_correction) {
705  dst_ptr[i] *= ((i % 2) == 1) ? -1 : 1;
706  dst_ptr[i] += s->dc_correction_value[c];
707  }
708  }
709  }
710 
711  s->delay--;
712  return filter_frame(inlink, out);
713 }
714 
715 static int flush(AVFilterLink *outlink)
716 {
717  AVFilterContext *ctx = outlink->src;
719  int ret = 0;
720 
721  if (!cqueue_empty(s->gain_history_smoothed[0])) {
722  ret = flush_buffer(s, ctx->inputs[0], outlink);
723  } else if (s->queue.available) {
724  AVFrame *out = ff_bufqueue_get(&s->queue);
725 
726  s->pts = out->pts;
727  ret = ff_filter_frame(outlink, out);
728  s->delay = s->queue.available;
729  }
730 
731  return ret;
732 }
733 
735 {
736  AVFilterLink *inlink = ctx->inputs[0];
737  AVFilterLink *outlink = ctx->outputs[0];
739  AVFrame *in = NULL;
740  int ret = 0, status;
741  int64_t pts;
742 
744 
745  if (!s->eof) {
746  ret = ff_inlink_consume_samples(inlink, s->frame_len, s->frame_len, &in);
747  if (ret < 0)
748  return ret;
749  if (ret > 0) {
751  if (ret <= 0)
752  return ret;
753  }
754 
755  if (ff_inlink_queued_samples(inlink) >= s->frame_len) {
757  return 0;
758  }
759  }
760 
761  if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
762  if (status == AVERROR_EOF)
763  s->eof = 1;
764  }
765 
766  if (s->eof && s->delay > 0)
767  return flush(outlink);
768 
769  if (s->eof && s->delay <= 0) {
770  ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
771  return 0;
772  }
773 
774  if (!s->eof)
776 
777  return FFERROR_NOT_READY;
778 }
779 
781  {
782  .name = "default",
783  .type = AVMEDIA_TYPE_AUDIO,
784  .config_props = config_input,
785  },
786  { NULL }
787 };
788 
790  {
791  .name = "default",
792  .type = AVMEDIA_TYPE_AUDIO,
793  },
794  { NULL }
795 };
796 
798  .name = "dynaudnorm",
799  .description = NULL_IF_CONFIG_SMALL("Dynamic Audio Normalizer."),
800  .query_formats = query_formats,
801  .priv_size = sizeof(DynamicAudioNormalizerContext),
802  .init = init,
803  .uninit = uninit,
804  .activate = activate,
807  .priv_class = &dynaudnorm_class,
809 };
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_dynaudnorm.c:295
formats
formats
Definition: signature.h:48
ff_get_audio_buffer
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
Definition: audio.c:86
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
flush_buffer
static int flush_buffer(DynamicAudioNormalizerContext *s, AVFilterLink *inlink, AVFilterLink *outlink)
Definition: af_dynaudnorm.c:690
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
OFFSET
#define OFFSET(x)
Definition: af_dynaudnorm.c:81
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
update_gain_history
static void update_gain_history(DynamicAudioNormalizerContext *s, int channel, double current_gain_factor)
Definition: af_dynaudnorm.c:442
out
FILE * out
Definition: movenc.c:54
ff_set_common_channel_layouts
int ff_set_common_channel_layouts(AVFilterContext *ctx, AVFilterChannelLayouts *layouts)
A helper for query_formats() which sets all links to the same list of channel layouts/sample rates.
Definition: formats.c:549
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:686
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
FFERROR_NOT_READY
return FFERROR_NOT_READY
Definition: filter_design.txt:204
analyze_frame
static void analyze_frame(DynamicAudioNormalizerContext *s, AVFrame *frame)
Definition: af_dynaudnorm.c:616
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dynaudnorm)
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
fade
static double fade(double prev, double next, int pos, double *fade_factors[2])
Definition: af_dynaudnorm.c:344
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:410
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:611
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:99
AVOption
AVOption.
Definition: opt.h:246
DynamicAudioNormalizerContext::dc_correction_value
double * dc_correction_value
Definition: af_dynaudnorm.c:64
float.h
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
c1
static const uint64_t c1
Definition: murmur3.c:49
avfilter_af_dynaudnorm_inputs
static const AVFilterPad avfilter_af_dynaudnorm_inputs[]
Definition: af_dynaudnorm.c:780
FF_FILTER_FORWARD_STATUS_BACK
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
DynamicAudioNormalizerContext::delay
int delay
Definition: af_dynaudnorm.c:70
sample_rate
sample_rate
Definition: ffmpeg_filter.c:191
find_peak_magnitude
static double find_peak_magnitude(AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:361
ff_bufqueue_get
static AVFrame * ff_bufqueue_get(struct FFBufQueue *queue)
Get the first buffer from the queue and remove it.
Definition: bufferqueue.h:98
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
get_max_local_gain
static double get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:410
cqueue::first
int first
Definition: af_dynaudnorm.c:44
DynamicAudioNormalizerContext
Definition: af_dynaudnorm.c:47
cqueue::size
int size
Definition: af_dynaudnorm.c:42
pts
static int64_t pts
Definition: transcode_aac.c:647
activate
static int activate(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:734
update_value
static double update_value(double new, double old, double aggressiveness)
Definition: af_dynaudnorm.c:493
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
avfilter_af_dynaudnorm_outputs
static const AVFilterPad avfilter_af_dynaudnorm_outputs[]
Definition: af_dynaudnorm.c:789
DynamicAudioNormalizerContext::is_enabled
cqueue * is_enabled
Definition: af_dynaudnorm.c:78
DynamicAudioNormalizerContext::queue
struct FFBufQueue queue
Definition: af_dynaudnorm.c:50
avassert.h
lrint
#define lrint
Definition: tablegen.h:53
cqueue::nb_elements
int nb_elements
Definition: af_dynaudnorm.c:43
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:84
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
frame_size
static int frame_size(int sample_rate, int frame_len_msec)
Definition: af_dynaudnorm.c:141
minimum_filter
static double minimum_filter(cqueue *q)
Definition: af_dynaudnorm.c:418
ff_outlink_set_status
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
s
#define s(width, name)
Definition: cbs_vp9.c:257
cqueue_empty
static int cqueue_empty(cqueue *q)
Definition: af_dynaudnorm.c:191
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:225
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
normalize.adjust
adjust
Definition: normalize.py:25
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
filters.h
DynamicAudioNormalizerContext::channels
int channels
Definition: af_dynaudnorm.c:69
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
copysign
static av_always_inline double copysign(double x, double y)
Definition: libm.h:68
cqueue_size
static int cqueue_size(cqueue *q)
Definition: af_dynaudnorm.c:186
DynamicAudioNormalizerContext::weights
double * weights
Definition: af_dynaudnorm.c:67
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:263
FLAGS
#define FLAGS
Definition: af_dynaudnorm.c:82
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
result
and forward the result(frame or status change) to the corresponding input. If nothing is possible
DynamicAudioNormalizerContext::peak_value
double peak_value
Definition: af_dynaudnorm.c:59
ff_inlink_consume_samples
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1500
NULL
#define NULL
Definition: coverity.c:32
pow_2
static double pow_2(const double value)
Definition: af_dynaudnorm.c:350
perform_dc_correction
static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *frame)
Definition: af_dynaudnorm.c:499
ff_bufqueue_discard_all
static void ff_bufqueue_discard_all(struct FFBufQueue *queue)
Unref and remove all buffers from the queue.
Definition: bufferqueue.h:111
flush
static int flush(AVFilterLink *outlink)
Definition: af_dynaudnorm.c:715
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
DynamicAudioNormalizerContext::frame_len_msec
int frame_len_msec
Definition: af_dynaudnorm.c:53
exp
int8_t exp
Definition: eval.c:72
ff_inlink_acknowledge_status
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1436
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
cqueue_enqueue
static int cqueue_enqueue(cqueue *q, double element)
Definition: af_dynaudnorm.c:196
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
bufferqueue.h
ff_af_dynaudnorm
AVFilter ff_af_dynaudnorm
Definition: af_dynaudnorm.c:797
compute_frame_std_dev
static double compute_frame_std_dev(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:544
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
DynamicAudioNormalizerContext::target_rms
double target_rms
Definition: af_dynaudnorm.c:61
DynamicAudioNormalizerContext::prev_amplification_factor
double * prev_amplification_factor
Definition: af_dynaudnorm.c:63
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
size
int size
Definition: twinvq_data.h:11134
amplify_frame
static void amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *frame, int enabled)
Definition: af_dynaudnorm.c:640
cqueue_pop
static int cqueue_pop(cqueue *q)
Definition: af_dynaudnorm.c:226
val
const char const char void * val
Definition: avisynth_c.h:863
cqueue_free
static void cqueue_free(cqueue *q)
Definition: af_dynaudnorm.c:179
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: af_dynaudnorm.c:665
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
FF_FILTER_FORWARD_WANTED
FF_FILTER_FORWARD_WANTED(outlink, inlink)
ff_bufqueue_add
static void ff_bufqueue_add(void *log, struct FFBufQueue *queue, AVFrame *buf)
Add a buffer to the queue.
Definition: bufferqueue.h:71
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_dynaudnorm.c:111
M_PI
#define M_PI
Definition: mathematics.h:52
internal.h
cqueue_create
static cqueue * cqueue_create(int size)
Definition: af_dynaudnorm.c:158
DynamicAudioNormalizerContext::compress_threshold
double * compress_threshold
Definition: af_dynaudnorm.c:65
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
DynamicAudioNormalizerContext::filter_size
int filter_size
Definition: af_dynaudnorm.c:54
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
DynamicAudioNormalizerContext::pts
int64_t pts
Definition: af_dynaudnorm.c:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
FFBufQueue
Structure holding the queue.
Definition: bufferqueue.h:49
DynamicAudioNormalizerContext::dc_correction
int dc_correction
Definition: af_dynaudnorm.c:55
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
DynamicAudioNormalizerContext::gain_history_minimum
cqueue ** gain_history_minimum
Definition: af_dynaudnorm.c:75
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
ff_inlink_queued_samples
int ff_inlink_queued_samples(AVFilterLink *link)
Definition: avfilter.c:1461
erf
static double erf(double z)
erf function Algorithm taken from the Boost project, source: http://www.boost.org/doc/libs/1_46_1/boo...
Definition: libm.h:121
AVFilter
Filter definition.
Definition: avfilter.h:144
bound
static double bound(const double threshold, const double val)
Definition: af_dynaudnorm.c:355
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
cqueue_peek
static double cqueue_peek(cqueue *q, int index)
Definition: af_dynaudnorm.c:209
cqueue::elements
double * elements
Definition: af_dynaudnorm.c:41
compute_frame_rms
static double compute_frame_rms(AVFrame *frame, int channel)
Definition: af_dynaudnorm.c:383
cqueue
Definition: af_dynaudnorm.c:40
init_gaussian_filter
static void init_gaussian_filter(DynamicAudioNormalizerContext *s)
Definition: af_dynaudnorm.c:236
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
c2
static const uint64_t c2
Definition: murmur3.c:50
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:244
dynaudnorm_options
static const AVOption dynaudnorm_options[]
Definition: af_dynaudnorm.c:84
DynamicAudioNormalizerContext::gain_history_original
cqueue ** gain_history_original
Definition: af_dynaudnorm.c:74
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
DynamicAudioNormalizerContext::alt_boundary_mode
int alt_boundary_mode
Definition: af_dynaudnorm.c:57
DynamicAudioNormalizerContext::compress_factor
double compress_factor
Definition: af_dynaudnorm.c:62
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
CONST
#define CONST(name, help, val, unit)
Definition: vf_bwdif.c:373
precalculate_fade_factors
static void precalculate_fade_factors(double *fade_factors[2], int frame_len)
Definition: af_dynaudnorm.c:147
DynamicAudioNormalizerContext::frame_len
int frame_len
Definition: af_dynaudnorm.c:52
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
DynamicAudioNormalizerContext::fade_factors
double * fade_factors[2]
Definition: af_dynaudnorm.c:66
DynamicAudioNormalizerContext::eof
int eof
Definition: af_dynaudnorm.c:71
audio.h
llrint
#define llrint(x)
Definition: libm.h:394
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
DynamicAudioNormalizerContext::channels_coupled
int channels_coupled
Definition: af_dynaudnorm.c:56
AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:133
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
perform_compression
static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame)
Definition: af_dynaudnorm.c:571
ff_set_common_samplerates
int ff_set_common_samplerates(AVFilterContext *ctx, AVFilterFormats *samplerates)
Definition: formats.c:556
gaussian_filter
static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q)
Definition: af_dynaudnorm.c:430
setup_compress_thresh
static double setup_compress_thresh(double threshold)
Definition: af_dynaudnorm.c:522
DynamicAudioNormalizerContext::max_amplification
double max_amplification
Definition: af_dynaudnorm.c:60
DynamicAudioNormalizerContext::gain_history_smoothed
cqueue ** gain_history_smoothed
Definition: af_dynaudnorm.c:76
cqueue_dequeue
static int cqueue_dequeue(cqueue *q, double *element)
Definition: af_dynaudnorm.c:215
channel
channel
Definition: ebur128.h:39
ff_filter_set_ready
void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
Mark a filter ready and schedule it for activation.
Definition: avfilter.c:193
min
float min
Definition: vorbis_enc_data.h:456