1 |
|
|
/* |
2 |
|
|
* This file is part of FFmpeg. |
3 |
|
|
* |
4 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
5 |
|
|
* modify it under the terms of the GNU Lesser General Public |
6 |
|
|
* License as published by the Free Software Foundation; either |
7 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
8 |
|
|
* |
9 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
10 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 |
|
|
* Lesser General Public License for more details. |
13 |
|
|
* |
14 |
|
|
* You should have received a copy of the GNU Lesser General Public |
15 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
16 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
17 |
|
|
*/ |
18 |
|
|
|
19 |
|
|
#include <stdint.h> |
20 |
|
|
|
21 |
|
|
#include "config.h" |
22 |
|
|
|
23 |
|
|
#include "libavutil/avassert.h" |
24 |
|
|
#include "libavutil/channel_layout.h" |
25 |
|
|
#include "libavutil/common.h" |
26 |
|
|
#include "libavutil/log.h" |
27 |
|
|
#include "libavutil/mathematics.h" |
28 |
|
|
#include "libavutil/opt.h" |
29 |
|
|
#include "libavutil/samplefmt.h" |
30 |
|
|
|
31 |
|
|
#include "audio.h" |
32 |
|
|
#include "avfilter.h" |
33 |
|
|
#include "internal.h" |
34 |
|
|
|
35 |
|
|
typedef struct TrimContext { |
36 |
|
|
const AVClass *class; |
37 |
|
|
|
38 |
|
|
/* |
39 |
|
|
* AVOptions |
40 |
|
|
*/ |
41 |
|
|
int64_t duration; |
42 |
|
|
int64_t start_time, end_time; |
43 |
|
|
int64_t start_frame, end_frame; |
44 |
|
|
/* |
45 |
|
|
* in the link timebase for video, |
46 |
|
|
* in 1/samplerate for audio |
47 |
|
|
*/ |
48 |
|
|
int64_t start_pts, end_pts; |
49 |
|
|
int64_t start_sample, end_sample; |
50 |
|
|
|
51 |
|
|
/* |
52 |
|
|
* number of video frames that arrived on this filter so far |
53 |
|
|
*/ |
54 |
|
|
int64_t nb_frames; |
55 |
|
|
/* |
56 |
|
|
* number of audio samples that arrived on this filter so far |
57 |
|
|
*/ |
58 |
|
|
int64_t nb_samples; |
59 |
|
|
/* |
60 |
|
|
* timestamp of the first frame in the output, in the timebase units |
61 |
|
|
*/ |
62 |
|
|
int64_t first_pts; |
63 |
|
|
/* |
64 |
|
|
* duration in the timebase units |
65 |
|
|
*/ |
66 |
|
|
int64_t duration_tb; |
67 |
|
|
|
68 |
|
|
int64_t next_pts; |
69 |
|
|
|
70 |
|
|
int eof; |
71 |
|
|
} TrimContext; |
72 |
|
|
|
73 |
|
516 |
static av_cold int init(AVFilterContext *ctx) |
74 |
|
|
{ |
75 |
|
516 |
TrimContext *s = ctx->priv; |
76 |
|
|
|
77 |
|
516 |
s->first_pts = AV_NOPTS_VALUE; |
78 |
|
|
|
79 |
|
516 |
return 0; |
80 |
|
|
} |
81 |
|
|
|
82 |
|
516 |
static int config_input(AVFilterLink *inlink) |
83 |
|
|
{ |
84 |
|
516 |
AVFilterContext *ctx = inlink->dst; |
85 |
|
516 |
TrimContext *s = ctx->priv; |
86 |
✓✓ |
516 |
AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ? |
87 |
|
411 |
inlink->time_base : (AVRational){ 1, inlink->sample_rate }; |
88 |
|
|
|
89 |
✓✓ |
516 |
if (s->start_time != INT64_MAX) { |
90 |
|
24 |
int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb); |
91 |
✗✓✗✗
|
24 |
if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts) |
92 |
|
24 |
s->start_pts = start_pts; |
93 |
|
|
} |
94 |
✓✓ |
516 |
if (s->end_time != INT64_MAX) { |
95 |
|
4 |
int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb); |
96 |
✗✓✗✗
|
4 |
if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts) |
97 |
|
4 |
s->end_pts = end_pts; |
98 |
|
|
} |
99 |
✓✓ |
516 |
if (s->duration) |
100 |
|
160 |
s->duration_tb = av_rescale_q(s->duration, AV_TIME_BASE_Q, tb); |
101 |
|
|
|
102 |
|
516 |
return 0; |
103 |
|
|
} |
104 |
|
|
|
105 |
|
|
#define OFFSET(x) offsetof(TrimContext, x) |
106 |
|
|
#define COMMON_OPTS \ |
107 |
|
|
{ "start", "Timestamp of the first frame that " \ |
108 |
|
|
"should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \ |
109 |
|
|
{ "starti", "Timestamp of the first frame that " \ |
110 |
|
|
"should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \ |
111 |
|
|
{ "end", "Timestamp of the first frame that " \ |
112 |
|
|
"should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \ |
113 |
|
|
{ "endi", "Timestamp of the first frame that " \ |
114 |
|
|
"should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \ |
115 |
|
|
{ "start_pts", "Timestamp of the first frame that should be " \ |
116 |
|
|
" passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \ |
117 |
|
|
{ "end_pts", "Timestamp of the first frame that should be " \ |
118 |
|
|
"dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \ |
119 |
|
|
{ "duration", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS }, \ |
120 |
|
|
{ "durationi", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS }, |
121 |
|
|
|
122 |
|
|
|
123 |
|
|
#if CONFIG_TRIM_FILTER |
124 |
|
3732 |
static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame) |
125 |
|
|
{ |
126 |
|
3732 |
AVFilterContext *ctx = inlink->dst; |
127 |
|
3732 |
TrimContext *s = ctx->priv; |
128 |
|
|
int drop; |
129 |
|
|
|
130 |
|
|
/* drop everything if EOF has already been returned */ |
131 |
✓✓ |
3732 |
if (s->eof) { |
132 |
|
14 |
av_frame_free(&frame); |
133 |
|
14 |
return 0; |
134 |
|
|
} |
135 |
|
|
|
136 |
✓✓✓✓
|
3718 |
if (s->start_frame >= 0 || s->start_pts != AV_NOPTS_VALUE) { |
137 |
|
124 |
drop = 1; |
138 |
✓✓✓✓
|
124 |
if (s->start_frame >= 0 && s->nb_frames >= s->start_frame) |
139 |
|
18 |
drop = 0; |
140 |
✓✓✓✗
|
124 |
if (s->start_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE && |
141 |
✓✓ |
113 |
frame->pts >= s->start_pts) |
142 |
|
98 |
drop = 0; |
143 |
✓✓ |
124 |
if (drop) |
144 |
|
14 |
goto drop; |
145 |
|
|
} |
146 |
|
|
|
147 |
✓✓✓✗
|
3704 |
if (s->first_pts == AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE) |
148 |
|
105 |
s->first_pts = frame->pts; |
149 |
|
|
|
150 |
✓✓✓✓ ✓✓ |
3704 |
if (s->end_frame != INT64_MAX || s->end_pts != AV_NOPTS_VALUE || s->duration_tb) { |
151 |
|
3617 |
drop = 1; |
152 |
|
|
|
153 |
✓✓✓✓
|
3617 |
if (s->end_frame != INT64_MAX && s->nb_frames < s->end_frame) |
154 |
|
9 |
drop = 0; |
155 |
✓✓✓✗
|
3617 |
if (s->end_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE && |
156 |
✓✓ |
13 |
frame->pts < s->end_pts) |
157 |
|
11 |
drop = 0; |
158 |
✓✓✓✗
|
3617 |
if (s->duration_tb && frame->pts != AV_NOPTS_VALUE && |
159 |
✓✓ |
3596 |
frame->pts - s->first_pts < s->duration_tb) |
160 |
|
3499 |
drop = 0; |
161 |
|
|
|
162 |
✓✓ |
3617 |
if (drop) { |
163 |
|
100 |
s->eof = 1; |
164 |
|
100 |
ff_avfilter_link_set_out_status(inlink, AVERROR_EOF, AV_NOPTS_VALUE); |
165 |
|
100 |
goto drop; |
166 |
|
|
} |
167 |
|
|
} |
168 |
|
|
|
169 |
|
3604 |
s->nb_frames++; |
170 |
|
|
|
171 |
|
3604 |
return ff_filter_frame(ctx->outputs[0], frame); |
172 |
|
|
|
173 |
|
114 |
drop: |
174 |
|
114 |
s->nb_frames++; |
175 |
|
114 |
av_frame_free(&frame); |
176 |
|
114 |
return 0; |
177 |
|
|
} |
178 |
|
|
|
179 |
|
|
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM |
180 |
|
|
static const AVOption trim_options[] = { |
181 |
|
|
COMMON_OPTS |
182 |
|
|
{ "start_frame", "Number of the first frame that should be passed " |
183 |
|
|
"to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS }, |
184 |
|
|
{ "end_frame", "Number of the first frame that should be dropped " |
185 |
|
|
"again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS }, |
186 |
|
|
{ NULL } |
187 |
|
|
}; |
188 |
|
|
#undef FLAGS |
189 |
|
|
|
190 |
|
|
AVFILTER_DEFINE_CLASS(trim); |
191 |
|
|
|
192 |
|
|
static const AVFilterPad trim_inputs[] = { |
193 |
|
|
{ |
194 |
|
|
.name = "default", |
195 |
|
|
.type = AVMEDIA_TYPE_VIDEO, |
196 |
|
|
.filter_frame = trim_filter_frame, |
197 |
|
|
.config_props = config_input, |
198 |
|
|
}, |
199 |
|
|
{ NULL } |
200 |
|
|
}; |
201 |
|
|
|
202 |
|
|
static const AVFilterPad trim_outputs[] = { |
203 |
|
|
{ |
204 |
|
|
.name = "default", |
205 |
|
|
.type = AVMEDIA_TYPE_VIDEO, |
206 |
|
|
}, |
207 |
|
|
{ NULL } |
208 |
|
|
}; |
209 |
|
|
|
210 |
|
|
AVFilter ff_vf_trim = { |
211 |
|
|
.name = "trim", |
212 |
|
|
.description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."), |
213 |
|
|
.init = init, |
214 |
|
|
.priv_size = sizeof(TrimContext), |
215 |
|
|
.priv_class = &trim_class, |
216 |
|
|
.inputs = trim_inputs, |
217 |
|
|
.outputs = trim_outputs, |
218 |
|
|
}; |
219 |
|
|
#endif // CONFIG_TRIM_FILTER |
220 |
|
|
|
221 |
|
|
#if CONFIG_ATRIM_FILTER |
222 |
|
8965 |
static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame) |
223 |
|
|
{ |
224 |
|
8965 |
AVFilterContext *ctx = inlink->dst; |
225 |
|
8965 |
TrimContext *s = ctx->priv; |
226 |
|
|
int64_t start_sample, end_sample; |
227 |
|
|
int64_t pts; |
228 |
|
|
int drop; |
229 |
|
|
|
230 |
|
|
/* drop everything if EOF has already been returned */ |
231 |
✓✓ |
8965 |
if (s->eof) { |
232 |
|
39 |
av_frame_free(&frame); |
233 |
|
39 |
return 0; |
234 |
|
|
} |
235 |
|
|
|
236 |
✓✗ |
8926 |
if (frame->pts != AV_NOPTS_VALUE) |
237 |
|
8926 |
pts = av_rescale_q(frame->pts, inlink->time_base, |
238 |
|
8926 |
(AVRational){ 1, inlink->sample_rate }); |
239 |
|
|
else |
240 |
|
|
pts = s->next_pts; |
241 |
|
8926 |
s->next_pts = pts + frame->nb_samples; |
242 |
|
|
|
243 |
|
|
/* check if at least a part of the frame is after the start time */ |
244 |
✓✓✓✓
|
8926 |
if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) { |
245 |
|
7134 |
start_sample = 0; |
246 |
|
|
} else { |
247 |
|
1792 |
drop = 1; |
248 |
|
1792 |
start_sample = frame->nb_samples; |
249 |
|
|
|
250 |
✓✓ |
1792 |
if (s->start_sample >= 0 && |
251 |
✓✓ |
8 |
s->nb_samples + frame->nb_samples > s->start_sample) { |
252 |
|
7 |
drop = 0; |
253 |
|
7 |
start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples); |
254 |
|
|
} |
255 |
|
|
|
256 |
✓✓✓✗
|
1792 |
if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && |
257 |
✓✓ |
1790 |
pts + frame->nb_samples > s->start_pts) { |
258 |
|
1458 |
drop = 0; |
259 |
|
1458 |
start_sample = FFMIN(start_sample, s->start_pts - pts); |
260 |
|
|
} |
261 |
|
|
|
262 |
✓✓ |
1792 |
if (drop) |
263 |
|
331 |
goto drop; |
264 |
|
|
} |
265 |
|
|
|
266 |
✓✓ |
8595 |
if (s->first_pts == AV_NOPTS_VALUE) |
267 |
|
411 |
s->first_pts = pts + start_sample; |
268 |
|
|
|
269 |
|
|
/* check if at least a part of the frame is before the end time */ |
270 |
✓✓✓✓ ✓✓ |
8595 |
if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) { |
271 |
|
1446 |
end_sample = frame->nb_samples; |
272 |
|
|
} else { |
273 |
|
7149 |
drop = 1; |
274 |
|
7149 |
end_sample = 0; |
275 |
|
|
|
276 |
✓✓ |
7149 |
if (s->end_sample != INT64_MAX && |
277 |
✓✓ |
2888 |
s->nb_samples < s->end_sample) { |
278 |
|
2554 |
drop = 0; |
279 |
|
2554 |
end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples); |
280 |
|
|
} |
281 |
|
|
|
282 |
✓✓✓✗
|
7149 |
if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && |
283 |
✓✓ |
11 |
pts < s->end_pts) { |
284 |
|
9 |
drop = 0; |
285 |
|
9 |
end_sample = FFMAX(end_sample, s->end_pts - pts); |
286 |
|
|
} |
287 |
|
|
|
288 |
✓✓✓✓
|
7149 |
if (s->duration_tb && pts - s->first_pts < s->duration_tb) { |
289 |
|
4197 |
drop = 0; |
290 |
|
4197 |
end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts); |
291 |
|
|
} |
292 |
|
|
|
293 |
✓✓ |
7149 |
if (drop) { |
294 |
|
393 |
s->eof = 1; |
295 |
|
393 |
ff_avfilter_link_set_out_status(inlink, AVERROR_EOF, AV_NOPTS_VALUE); |
296 |
|
393 |
goto drop; |
297 |
|
|
} |
298 |
|
|
} |
299 |
|
|
|
300 |
|
8202 |
s->nb_samples += frame->nb_samples; |
301 |
|
8202 |
start_sample = FFMAX(0, start_sample); |
302 |
|
8202 |
end_sample = FFMIN(frame->nb_samples, end_sample); |
303 |
✓✗✗✓
|
8202 |
if (start_sample >= end_sample || !frame->nb_samples) |
304 |
|
|
goto drop; |
305 |
|
|
|
306 |
✓✓ |
8202 |
if (start_sample) { |
307 |
|
6 |
AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample); |
308 |
✗✓ |
6 |
if (!out) { |
309 |
|
|
av_frame_free(&frame); |
310 |
|
|
return AVERROR(ENOMEM); |
311 |
|
|
} |
312 |
|
|
|
313 |
|
6 |
av_frame_copy_props(out, frame); |
314 |
|
6 |
av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample, |
315 |
|
|
out->nb_samples, inlink->channels, |
316 |
|
6 |
frame->format); |
317 |
✓✗ |
6 |
if (out->pts != AV_NOPTS_VALUE) |
318 |
|
6 |
out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate }, |
319 |
|
|
inlink->time_base); |
320 |
|
|
|
321 |
|
6 |
av_frame_free(&frame); |
322 |
|
6 |
frame = out; |
323 |
|
|
} else |
324 |
|
8196 |
frame->nb_samples = end_sample; |
325 |
|
|
|
326 |
|
8202 |
return ff_filter_frame(ctx->outputs[0], frame); |
327 |
|
|
|
328 |
|
724 |
drop: |
329 |
|
724 |
s->nb_samples += frame->nb_samples; |
330 |
|
724 |
av_frame_free(&frame); |
331 |
|
724 |
return 0; |
332 |
|
|
} |
333 |
|
|
|
334 |
|
|
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM |
335 |
|
|
static const AVOption atrim_options[] = { |
336 |
|
|
COMMON_OPTS |
337 |
|
|
{ "start_sample", "Number of the first audio sample that should be " |
338 |
|
|
"passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS }, |
339 |
|
|
{ "end_sample", "Number of the first audio sample that should be " |
340 |
|
|
"dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS }, |
341 |
|
|
{ NULL } |
342 |
|
|
}; |
343 |
|
|
#undef FLAGS |
344 |
|
|
|
345 |
|
|
AVFILTER_DEFINE_CLASS(atrim); |
346 |
|
|
|
347 |
|
|
static const AVFilterPad atrim_inputs[] = { |
348 |
|
|
{ |
349 |
|
|
.name = "default", |
350 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
351 |
|
|
.filter_frame = atrim_filter_frame, |
352 |
|
|
.config_props = config_input, |
353 |
|
|
}, |
354 |
|
|
{ NULL } |
355 |
|
|
}; |
356 |
|
|
|
357 |
|
|
static const AVFilterPad atrim_outputs[] = { |
358 |
|
|
{ |
359 |
|
|
.name = "default", |
360 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
361 |
|
|
}, |
362 |
|
|
{ NULL } |
363 |
|
|
}; |
364 |
|
|
|
365 |
|
|
AVFilter ff_af_atrim = { |
366 |
|
|
.name = "atrim", |
367 |
|
|
.description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."), |
368 |
|
|
.init = init, |
369 |
|
|
.priv_size = sizeof(TrimContext), |
370 |
|
|
.priv_class = &atrim_class, |
371 |
|
|
.inputs = atrim_inputs, |
372 |
|
|
.outputs = atrim_outputs, |
373 |
|
|
}; |
374 |
|
|
#endif // CONFIG_ATRIM_FILTER |