FFmpeg
filter_audio.c
Go to the documentation of this file.
1 /*
2  * copyright (c) 2013 Andrew Kelley
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * libavfilter API usage example.
24  *
25  * @example filter_audio.c
26  * This example will generate a sine wave audio,
27  * pass it through a simple filter chain, and then compute the MD5 checksum of
28  * the output data.
29  *
30  * The filter chain it uses is:
31  * (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
32  *
33  * abuffer: This provides the endpoint where you can feed the decoded samples.
34  * volume: In this example we hardcode it to 0.90.
35  * aformat: This converts the samples to the samplefreq, channel layout,
36  * and sample format required by the audio device.
37  * abuffersink: This provides the endpoint where you can read the samples after
38  * they have passed through the filter chain.
39  */
40 
41 #include <inttypes.h>
42 #include <math.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 
47 #include "libavutil/md5.h"
48 #include "libavutil/mem.h"
49 #include "libavutil/opt.h"
50 #include "libavutil/samplefmt.h"
51 
52 #include "libavfilter/avfilter.h"
53 #include "libavfilter/buffersink.h"
54 #include "libavfilter/buffersrc.h"
55 
56 #define INPUT_SAMPLERATE 48000
57 #define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
58 #define INPUT_CHANNEL_LAYOUT (AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT0
59 
60 #define VOLUME_VAL 0.90
61 
63  AVFilterContext **sink)
64 {
66  AVFilterContext *abuffer_ctx;
67  const AVFilter *abuffer;
68  AVFilterContext *volume_ctx;
69  const AVFilter *volume;
70  AVFilterContext *aformat_ctx;
71  const AVFilter *aformat;
72  AVFilterContext *abuffersink_ctx;
73  const AVFilter *abuffersink;
74 
75  AVDictionary *options_dict = NULL;
76  uint8_t options_str[1024];
77  uint8_t ch_layout[64];
78 
79  int err;
80 
81  /* Create a new filtergraph, which will contain all the filters. */
83  if (!filter_graph) {
84  fprintf(stderr, "Unable to create filter graph.\n");
85  return AVERROR(ENOMEM);
86  }
87 
88  /* Create the abuffer filter;
89  * it will be used for feeding the data into the graph. */
90  abuffer = avfilter_get_by_name("abuffer");
91  if (!abuffer) {
92  fprintf(stderr, "Could not find the abuffer filter.\n");
94  }
95 
96  abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
97  if (!abuffer_ctx) {
98  fprintf(stderr, "Could not allocate the abuffer instance.\n");
99  return AVERROR(ENOMEM);
100  }
101 
102  /* Set the filter options through the AVOptions API. */
103  av_channel_layout_describe(&INPUT_CHANNEL_LAYOUT, ch_layout, sizeof(ch_layout));
104  av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
106  av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
107  av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN);
108 
109  /* Now initialize the filter; we pass NULL options, since we have already
110  * set all the options above. */
111  err = avfilter_init_str(abuffer_ctx, NULL);
112  if (err < 0) {
113  fprintf(stderr, "Could not initialize the abuffer filter.\n");
114  return err;
115  }
116 
117  /* Create volume filter. */
118  volume = avfilter_get_by_name("volume");
119  if (!volume) {
120  fprintf(stderr, "Could not find the volume filter.\n");
122  }
123 
124  volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
125  if (!volume_ctx) {
126  fprintf(stderr, "Could not allocate the volume instance.\n");
127  return AVERROR(ENOMEM);
128  }
129 
130  /* A different way of passing the options is as key/value pairs in a
131  * dictionary. */
132  av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
133  err = avfilter_init_dict(volume_ctx, &options_dict);
134  av_dict_free(&options_dict);
135  if (err < 0) {
136  fprintf(stderr, "Could not initialize the volume filter.\n");
137  return err;
138  }
139 
140  /* Create the aformat filter;
141  * it ensures that the output is of the format we want. */
142  aformat = avfilter_get_by_name("aformat");
143  if (!aformat) {
144  fprintf(stderr, "Could not find the aformat filter.\n");
146  }
147 
148  aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
149  if (!aformat_ctx) {
150  fprintf(stderr, "Could not allocate the aformat instance.\n");
151  return AVERROR(ENOMEM);
152  }
153 
154  /* A third way of passing the options is in a string of the form
155  * key1=value1:key2=value2.... */
156  snprintf(options_str, sizeof(options_str),
157  "sample_fmts=%s:sample_rates=%d:channel_layouts=stereo",
159  err = avfilter_init_str(aformat_ctx, options_str);
160  if (err < 0) {
161  av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
162  return err;
163  }
164 
165  /* Finally create the abuffersink filter;
166  * it will be used to get the filtered data out of the graph. */
167  abuffersink = avfilter_get_by_name("abuffersink");
168  if (!abuffersink) {
169  fprintf(stderr, "Could not find the abuffersink filter.\n");
171  }
172 
173  abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
174  if (!abuffersink_ctx) {
175  fprintf(stderr, "Could not allocate the abuffersink instance.\n");
176  return AVERROR(ENOMEM);
177  }
178 
179  /* This filter takes no options. */
180  err = avfilter_init_str(abuffersink_ctx, NULL);
181  if (err < 0) {
182  fprintf(stderr, "Could not initialize the abuffersink instance.\n");
183  return err;
184  }
185 
186  /* Connect the filters;
187  * in this simple case the filters just form a linear chain. */
188  err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
189  if (err >= 0)
190  err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
191  if (err >= 0)
192  err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
193  if (err < 0) {
194  fprintf(stderr, "Error connecting filters\n");
195  return err;
196  }
197 
198  /* Configure the graph. */
200  if (err < 0) {
201  av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
202  return err;
203  }
204 
205  *graph = filter_graph;
206  *src = abuffer_ctx;
207  *sink = abuffersink_ctx;
208 
209  return 0;
210 }
211 
212 /* Do something useful with the filtered data: this simple
213  * example just prints the MD5 checksum of each plane to stdout. */
214 static int process_output(struct AVMD5 *md5, AVFrame *frame)
215 {
216  int planar = av_sample_fmt_is_planar(frame->format);
217  int channels = frame->ch_layout.nb_channels;
218  int planes = planar ? channels : 1;
219  int bps = av_get_bytes_per_sample(frame->format);
220  int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
221  int i, j;
222 
223  for (i = 0; i < planes; i++) {
224  uint8_t checksum[16];
225 
226  av_md5_init(md5);
227  av_md5_sum(checksum, frame->extended_data[i], plane_size);
228 
229  fprintf(stdout, "plane %d: 0x", i);
230  for (j = 0; j < sizeof(checksum); j++)
231  fprintf(stdout, "%02X", checksum[j]);
232  fprintf(stdout, "\n");
233  }
234  fprintf(stdout, "\n");
235 
236  return 0;
237 }
238 
239 /* Construct a frame of audio data to be filtered;
240  * this simple example just synthesizes a sine wave. */
241 static int get_input(AVFrame *frame, int frame_num)
242 {
243  int err, i, j;
244 
245 #define FRAME_SIZE 1024
246 
247  /* Set up the frame properties and allocate the buffer for the data. */
248  frame->sample_rate = INPUT_SAMPLERATE;
249  frame->format = INPUT_FORMAT;
251  frame->nb_samples = FRAME_SIZE;
252  frame->pts = frame_num * FRAME_SIZE;
253 
254  err = av_frame_get_buffer(frame, 0);
255  if (err < 0)
256  return err;
257 
258  /* Fill the data for each channel. */
259  for (i = 0; i < 5; i++) {
260  float *data = (float*)frame->extended_data[i];
261 
262  for (j = 0; j < frame->nb_samples; j++)
263  data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE);
264  }
265 
266  return 0;
267 }
268 
269 int main(int argc, char *argv[])
270 {
271  struct AVMD5 *md5;
272  AVFilterGraph *graph;
273  AVFilterContext *src, *sink;
274  AVFrame *frame;
275  uint8_t errstr[1024];
276  float duration;
277  int err, nb_frames, i;
278 
279  if (argc < 2) {
280  fprintf(stderr, "Usage: %s <duration>\n", argv[0]);
281  return 1;
282  }
283 
284  duration = atof(argv[1]);
285  nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE;
286  if (nb_frames <= 0) {
287  fprintf(stderr, "Invalid duration: %s\n", argv[1]);
288  return 1;
289  }
290 
291  /* Allocate the frame we will be using to store the data. */
292  frame = av_frame_alloc();
293  if (!frame) {
294  fprintf(stderr, "Error allocating the frame\n");
295  return 1;
296  }
297 
298  md5 = av_md5_alloc();
299  if (!md5) {
300  fprintf(stderr, "Error allocating the MD5 context\n");
301  return 1;
302  }
303 
304  /* Set up the filtergraph. */
305  err = init_filter_graph(&graph, &src, &sink);
306  if (err < 0) {
307  fprintf(stderr, "Unable to init filter graph:");
308  goto fail;
309  }
310 
311  /* the main filtering loop */
312  for (i = 0; i < nb_frames; i++) {
313  /* get an input frame to be filtered */
314  err = get_input(frame, i);
315  if (err < 0) {
316  fprintf(stderr, "Error generating input frame:");
317  goto fail;
318  }
319 
320  /* Send the frame to the input of the filtergraph. */
322  if (err < 0) {
324  fprintf(stderr, "Error submitting the frame to the filtergraph:");
325  goto fail;
326  }
327 
328  /* Get all the filtered output that is available. */
329  while ((err = av_buffersink_get_frame(sink, frame)) >= 0) {
330  /* now do something with our filtered frame */
331  err = process_output(md5, frame);
332  if (err < 0) {
333  fprintf(stderr, "Error processing the filtered frame:");
334  goto fail;
335  }
337  }
338 
339  if (err == AVERROR(EAGAIN)) {
340  /* Need to feed more frames in. */
341  continue;
342  } else if (err == AVERROR_EOF) {
343  /* Nothing more to do, finish. */
344  break;
345  } else if (err < 0) {
346  /* An error occurred. */
347  fprintf(stderr, "Error filtering the data:");
348  goto fail;
349  }
350  }
351 
352  avfilter_graph_free(&graph);
354  av_freep(&md5);
355 
356  return 0;
357 
358 fail:
359  av_strerror(err, errstr, sizeof(errstr));
360  fprintf(stderr, "%s\n", errstr);
361  return 1;
362 }
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:254
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
INPUT_FORMAT
#define INPUT_FORMAT
Definition: filter_audio.c:57
FRAME_SIZE
#define FRAME_SIZE
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:159
data
const char data[16]
Definition: mxf.c:143
AVDictionary
Definition: dict.c:30
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:637
get_input
static int get_input(AVFrame *frame, int frame_num)
Definition: filter_audio.c:241
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:165
fail
#define fail()
Definition: checkasm.h:131
filter_graph
AVFilterGraph * filter_graph
Definition: filtering_audio.c:47
md5
struct AVMD5 * md5
Definition: movenc.c:56
av_strerror
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:108
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
samplefmt.h
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:487
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVMD5
Definition: md5.c:40
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
duration
int64_t duration
Definition: movenc.c:64
INPUT_SAMPLERATE
#define INPUT_SAMPLERATE
Definition: filter_audio.c:56
init_filter_graph
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src, AVFilterContext **sink)
Definition: filter_audio.c:62
av_buffersink_get_frame
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:94
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:114
channels
channels
Definition: aptx.h:32
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
main
int main(int argc, char *argv[])
Definition: filter_audio.c:269
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:595
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1166
NULL
#define NULL
Definition: coverity.c:32
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
process_output
static int process_output(struct AVMD5 *md5, AVFrame *frame)
Definition: filter_audio.c:214
AVFilterGraph
Definition: avfilter.h:871
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:624
planes
static const struct @328 planes[]
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:140
bps
unsigned bps
Definition: movenc.c:1647
av_md5_sum
void av_md5_sum(uint8_t *dst, const uint8_t *src, size_t len)
Hash an array of data.
Definition: md5.c:201
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:563
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
INPUT_CHANNEL_LAYOUT
#define INPUT_CHANNEL_LAYOUT
Definition: filter_audio.c:58
M_PI
#define M_PI
Definition: mathematics.h:52
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:919
buffersink.h
av_md5_init
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
Definition: md5.c:141
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
md5.h
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
AVFilter
Filter definition.
Definition: avfilter.h:171
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
channel_layout.h
av_md5_alloc
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
Definition: md5.c:48
avfilter_init_dict
int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
Initialize a filter with the supplied dictionary of options.
Definition: avfilter.c:876
avfilter.h
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:776
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
AVFilterContext
An instance of a filter.
Definition: avfilter.h:408
mem.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
VOLUME_VAL
#define VOLUME_VAL
Definition: filter_audio.c:60
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_opt_set_q
int av_opt_set_q(void *obj, const char *name, AVRational val, int search_flags)
Definition: opt.c:634
snprintf
#define snprintf
Definition: snprintf.h:34
buffersrc.h