FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
trim.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <float.h>
20 #include <math.h>
21 #include <stdint.h>
22 
23 #include "config.h"
24 
25 #include "libavutil/avassert.h"
27 #include "libavutil/common.h"
28 #include "libavutil/log.h"
29 #include "libavutil/mathematics.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/samplefmt.h"
32 
33 #include "audio.h"
34 #include "avfilter.h"
35 #include "internal.h"
36 
37 typedef struct TrimContext {
38  const AVClass *class;
39 
40  /*
41  * AVOptions
42  */
43  int64_t duration;
44  int64_t start_time, end_time;
46 
47  double duration_dbl;
49  /*
50  * in the link timebase for video,
51  * in 1/samplerate for audio
52  */
53  int64_t start_pts, end_pts;
55 
56  /*
57  * number of video frames that arrived on this filter so far
58  */
59  int64_t nb_frames;
60  /*
61  * number of audio samples that arrived on this filter so far
62  */
63  int64_t nb_samples;
64  /*
65  * timestamp of the first frame in the output, in the timebase units
66  */
67  int64_t first_pts;
68  /*
69  * duration in the timebase units
70  */
71  int64_t duration_tb;
72 
73  int64_t next_pts;
74 
75  int eof;
76 } TrimContext;
77 
78 static av_cold int init(AVFilterContext *ctx)
79 {
80  TrimContext *s = ctx->priv;
81 
83 
84  return 0;
85 }
86 
87 static int config_input(AVFilterLink *inlink)
88 {
89  AVFilterContext *ctx = inlink->dst;
90  TrimContext *s = ctx->priv;
91  AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
92  inlink->time_base : (AVRational){ 1, inlink->sample_rate };
93 
94  if (s->start_time_dbl != DBL_MAX)
95  s->start_time = s->start_time_dbl * 1e6;
96  if (s->end_time_dbl != DBL_MAX)
97  s->end_time = s->end_time_dbl * 1e6;
98  if (s->duration_dbl != 0)
99  s->duration = s->duration_dbl * 1e6;
100 
101  if (s->start_time != INT64_MAX) {
102  int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb);
103  if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
104  s->start_pts = start_pts;
105  }
106  if (s->end_time != INT64_MAX) {
107  int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb);
108  if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
109  s->end_pts = end_pts;
110  }
111  if (s->duration)
113 
114  return 0;
115 }
116 
117 static int config_output(AVFilterLink *outlink)
118 {
119  outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
120  return 0;
121 }
122 
123 #define OFFSET(x) offsetof(TrimContext, x)
124 #define COMMON_OPTS \
125  { "starti", "Timestamp of the first frame that " \
126  "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
127  { "endi", "Timestamp of the first frame that " \
128  "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
129  { "start_pts", "Timestamp of the first frame that should be " \
130  " passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
131  { "end_pts", "Timestamp of the first frame that should be " \
132  "dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
133  { "durationi", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS },
134 
135 #define COMPAT_OPTS \
136  { "start", "Timestamp in seconds of the first frame that " \
137  "should be passed", OFFSET(start_time_dbl),AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
138  { "end", "Timestamp in seconds of the first frame that " \
139  "should be dropped again", OFFSET(end_time_dbl), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
140  { "duration", "Maximum duration of the output in seconds", OFFSET(duration_dbl), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, DBL_MAX, FLAGS },
141 
142 
143 #if CONFIG_TRIM_FILTER
144 static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
145 {
146  AVFilterContext *ctx = inlink->dst;
147  TrimContext *s = ctx->priv;
148  int drop;
149 
150  /* drop everything if EOF has already been returned */
151  if (s->eof) {
152  av_frame_free(&frame);
153  return 0;
154  }
155 
156  if (s->start_frame >= 0 || s->start_pts != AV_NOPTS_VALUE) {
157  drop = 1;
158  if (s->start_frame >= 0 && s->nb_frames >= s->start_frame)
159  drop = 0;
160  if (s->start_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
161  frame->pts >= s->start_pts)
162  drop = 0;
163  if (drop)
164  goto drop;
165  }
166 
167  if (s->first_pts == AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE)
168  s->first_pts = frame->pts;
169 
170  if (s->end_frame != INT64_MAX || s->end_pts != AV_NOPTS_VALUE || s->duration_tb) {
171  drop = 1;
172 
173  if (s->end_frame != INT64_MAX && s->nb_frames < s->end_frame)
174  drop = 0;
175  if (s->end_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
176  frame->pts < s->end_pts)
177  drop = 0;
178  if (s->duration_tb && frame->pts != AV_NOPTS_VALUE &&
179  frame->pts - s->first_pts < s->duration_tb)
180  drop = 0;
181 
182  if (drop) {
183  s->eof = inlink->closed = 1;
184  goto drop;
185  }
186  }
187 
188  s->nb_frames++;
189 
190  return ff_filter_frame(ctx->outputs[0], frame);
191 
192 drop:
193  s->nb_frames++;
194  av_frame_free(&frame);
195  return 0;
196 }
197 
198 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
199 static const AVOption trim_options[] = {
201  { "start_frame", "Number of the first frame that should be passed "
202  "to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
203  { "end_frame", "Number of the first frame that should be dropped "
204  "again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
206  { NULL }
207 };
208 #undef FLAGS
209 
211 
212 static const AVFilterPad trim_inputs[] = {
213  {
214  .name = "default",
215  .type = AVMEDIA_TYPE_VIDEO,
216  .filter_frame = trim_filter_frame,
217  .config_props = config_input,
218  },
219  { NULL }
220 };
221 
222 static const AVFilterPad trim_outputs[] = {
223  {
224  .name = "default",
225  .type = AVMEDIA_TYPE_VIDEO,
226  .config_props = config_output,
227  },
228  { NULL }
229 };
230 
231 AVFilter ff_vf_trim = {
232  .name = "trim",
233  .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
234  .init = init,
235  .priv_size = sizeof(TrimContext),
236  .priv_class = &trim_class,
237  .inputs = trim_inputs,
238  .outputs = trim_outputs,
239 };
240 #endif // CONFIG_TRIM_FILTER
241 
242 #if CONFIG_ATRIM_FILTER
243 static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
244 {
245  AVFilterContext *ctx = inlink->dst;
246  TrimContext *s = ctx->priv;
247  int64_t start_sample, end_sample;
248  int64_t pts;
249  int drop;
250 
251  /* drop everything if EOF has already been returned */
252  if (s->eof) {
253  av_frame_free(&frame);
254  return 0;
255  }
256 
257  if (frame->pts != AV_NOPTS_VALUE)
258  pts = av_rescale_q(frame->pts, inlink->time_base,
259  (AVRational){ 1, inlink->sample_rate });
260  else
261  pts = s->next_pts;
262  s->next_pts = pts + frame->nb_samples;
263 
264  /* check if at least a part of the frame is after the start time */
265  if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
266  start_sample = 0;
267  } else {
268  drop = 1;
269  start_sample = frame->nb_samples;
270 
271  if (s->start_sample >= 0 &&
272  s->nb_samples + frame->nb_samples > s->start_sample) {
273  drop = 0;
274  start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
275  }
276 
277  if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
278  pts + frame->nb_samples > s->start_pts) {
279  drop = 0;
280  start_sample = FFMIN(start_sample, s->start_pts - pts);
281  }
282 
283  if (drop)
284  goto drop;
285  }
286 
287  if (s->first_pts == AV_NOPTS_VALUE)
288  s->first_pts = pts + start_sample;
289 
290  /* check if at least a part of the frame is before the end time */
291  if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
292  end_sample = frame->nb_samples;
293  } else {
294  drop = 1;
295  end_sample = 0;
296 
297  if (s->end_sample != INT64_MAX &&
298  s->nb_samples < s->end_sample) {
299  drop = 0;
300  end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
301  }
302 
303  if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
304  pts < s->end_pts) {
305  drop = 0;
306  end_sample = FFMAX(end_sample, s->end_pts - pts);
307  }
308 
309  if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
310  drop = 0;
311  end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
312  }
313 
314  if (drop) {
315  s->eof = inlink->closed = 1;
316  goto drop;
317  }
318  }
319 
320  s->nb_samples += frame->nb_samples;
321  start_sample = FFMAX(0, start_sample);
322  end_sample = FFMIN(frame->nb_samples, end_sample);
323  av_assert0(start_sample < end_sample || (start_sample == end_sample && !frame->nb_samples));
324 
325  if (start_sample) {
326  AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
327  if (!out) {
328  av_frame_free(&frame);
329  return AVERROR(ENOMEM);
330  }
331 
332  av_frame_copy_props(out, frame);
333  av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
334  out->nb_samples, inlink->channels,
335  frame->format);
336  if (out->pts != AV_NOPTS_VALUE)
337  out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
338  inlink->time_base);
339 
340  av_frame_free(&frame);
341  frame = out;
342  } else
343  frame->nb_samples = end_sample;
344 
345  return ff_filter_frame(ctx->outputs[0], frame);
346 
347 drop:
348  s->nb_samples += frame->nb_samples;
349  av_frame_free(&frame);
350  return 0;
351 }
352 
353 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
354 static const AVOption atrim_options[] = {
356  { "start_sample", "Number of the first audio sample that should be "
357  "passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
358  { "end_sample", "Number of the first audio sample that should be "
359  "dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
361  { NULL }
362 };
363 #undef FLAGS
364 
365 AVFILTER_DEFINE_CLASS(atrim);
366 
367 static const AVFilterPad atrim_inputs[] = {
368  {
369  .name = "default",
370  .type = AVMEDIA_TYPE_AUDIO,
371  .filter_frame = atrim_filter_frame,
372  .config_props = config_input,
373  },
374  { NULL }
375 };
376 
377 static const AVFilterPad atrim_outputs[] = {
378  {
379  .name = "default",
380  .type = AVMEDIA_TYPE_AUDIO,
381  .config_props = config_output,
382  },
383  { NULL }
384 };
385 
386 AVFilter ff_af_atrim = {
387  .name = "atrim",
388  .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
389  .init = init,
390  .priv_size = sizeof(TrimContext),
391  .priv_class = &atrim_class,
392  .inputs = atrim_inputs,
393  .outputs = atrim_outputs,
394 };
395 #endif // CONFIG_ATRIM_FILTER