Line |
Branch |
Exec |
Source |
1 |
|
|
/* |
2 |
|
|
* Copyright (c) 2016 Paul B Mahol |
3 |
|
|
* |
4 |
|
|
* This file is part of FFmpeg. |
5 |
|
|
* |
6 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
7 |
|
|
* modify it under the terms of the GNU Lesser General Public |
8 |
|
|
* License as published by the Free Software Foundation; either |
9 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
12 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
|
|
* Lesser General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Lesser General Public |
17 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
18 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
#include "config_components.h" |
22 |
|
|
|
23 |
|
|
#include "libavutil/audio_fifo.h" |
24 |
|
|
#include "libavutil/internal.h" |
25 |
|
|
#include "libavutil/mem.h" |
26 |
|
|
#include "libavutil/opt.h" |
27 |
|
|
#include "avfilter.h" |
28 |
|
|
#include "audio.h" |
29 |
|
|
#include "filters.h" |
30 |
|
|
#include "internal.h" |
31 |
|
|
#include "video.h" |
32 |
|
|
|
33 |
|
|
typedef struct LoopContext { |
34 |
|
|
const AVClass *class; |
35 |
|
|
|
36 |
|
|
AVAudioFifo *fifo; |
37 |
|
|
AVAudioFifo *left; |
38 |
|
|
AVFrame **frames; |
39 |
|
|
int nb_frames; |
40 |
|
|
int current_frame; |
41 |
|
|
int64_t time_pts; |
42 |
|
|
int64_t duration; |
43 |
|
|
int64_t current_sample; |
44 |
|
|
int64_t nb_samples; |
45 |
|
|
int64_t ignored_samples; |
46 |
|
|
|
47 |
|
|
int loop; |
48 |
|
|
int eof; |
49 |
|
|
int64_t size; |
50 |
|
|
int64_t start; |
51 |
|
|
int64_t time; |
52 |
|
|
int64_t pts; |
53 |
|
|
int64_t pts_offset; |
54 |
|
|
int64_t eof_pts; |
55 |
|
|
} LoopContext; |
56 |
|
|
|
57 |
|
|
#define AFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM |
58 |
|
|
#define VFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM |
59 |
|
|
#define OFFSET(x) offsetof(LoopContext, x) |
60 |
|
|
|
61 |
|
✗ |
static void check_size(AVFilterContext *ctx) |
62 |
|
|
{ |
63 |
|
✗ |
LoopContext *s = ctx->priv; |
64 |
|
|
|
65 |
|
✗ |
if (!s->size) |
66 |
|
✗ |
av_log(ctx, AV_LOG_WARNING, "Number of %s to loop is not set!\n", |
67 |
|
✗ |
ctx->input_pads[0].type == AVMEDIA_TYPE_VIDEO ? "frames" : "samples"); |
68 |
|
✗ |
} |
69 |
|
|
|
70 |
|
✗ |
static void update_time(AVFilterContext *ctx, AVRational tb) |
71 |
|
|
{ |
72 |
|
✗ |
LoopContext *s = ctx->priv; |
73 |
|
|
|
74 |
|
✗ |
if (s->time != INT64_MAX) { |
75 |
|
✗ |
int64_t time_pts = av_rescale_q(s->time, AV_TIME_BASE_Q, tb); |
76 |
|
✗ |
if (s->time_pts == AV_NOPTS_VALUE || time_pts < s->time_pts) |
77 |
|
✗ |
s->time_pts = time_pts; |
78 |
|
|
} |
79 |
|
✗ |
} |
80 |
|
|
|
81 |
|
|
#if CONFIG_ALOOP_FILTER |
82 |
|
|
|
83 |
|
✗ |
static int aconfig_input(AVFilterLink *inlink) |
84 |
|
|
{ |
85 |
|
✗ |
AVFilterContext *ctx = inlink->dst; |
86 |
|
✗ |
LoopContext *s = ctx->priv; |
87 |
|
|
|
88 |
|
✗ |
s->time_pts = AV_NOPTS_VALUE; |
89 |
|
|
|
90 |
|
✗ |
s->fifo = av_audio_fifo_alloc(inlink->format, inlink->ch_layout.nb_channels, 8192); |
91 |
|
✗ |
s->left = av_audio_fifo_alloc(inlink->format, inlink->ch_layout.nb_channels, 8192); |
92 |
|
✗ |
if (!s->fifo || !s->left) |
93 |
|
✗ |
return AVERROR(ENOMEM); |
94 |
|
|
|
95 |
|
✗ |
check_size(ctx); |
96 |
|
|
|
97 |
|
✗ |
return 0; |
98 |
|
|
} |
99 |
|
|
|
100 |
|
✗ |
static av_cold void auninit(AVFilterContext *ctx) |
101 |
|
|
{ |
102 |
|
✗ |
LoopContext *s = ctx->priv; |
103 |
|
|
|
104 |
|
✗ |
av_audio_fifo_free(s->fifo); |
105 |
|
✗ |
av_audio_fifo_free(s->left); |
106 |
|
✗ |
} |
107 |
|
|
|
108 |
|
✗ |
static int push_samples(AVFilterContext *ctx, int nb_samples) |
109 |
|
|
{ |
110 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
111 |
|
✗ |
LoopContext *s = ctx->priv; |
112 |
|
|
AVFrame *out; |
113 |
|
✗ |
int ret = 0, i = 0; |
114 |
|
|
|
115 |
|
✗ |
while (s->loop != 0 && i < nb_samples) { |
116 |
|
✗ |
out = ff_get_audio_buffer(outlink, FFMIN(nb_samples, s->nb_samples - s->current_sample)); |
117 |
|
✗ |
if (!out) |
118 |
|
✗ |
return AVERROR(ENOMEM); |
119 |
|
✗ |
ret = av_audio_fifo_peek_at(s->fifo, (void **)out->extended_data, out->nb_samples, s->current_sample); |
120 |
|
✗ |
if (ret < 0) { |
121 |
|
✗ |
av_frame_free(&out); |
122 |
|
✗ |
return ret; |
123 |
|
|
} |
124 |
|
✗ |
out->pts = s->pts; |
125 |
|
✗ |
out->nb_samples = ret; |
126 |
|
✗ |
s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); |
127 |
|
✗ |
i += out->nb_samples; |
128 |
|
✗ |
s->current_sample += out->nb_samples; |
129 |
|
|
|
130 |
|
✗ |
ret = ff_filter_frame(outlink, out); |
131 |
|
✗ |
if (ret < 0) |
132 |
|
✗ |
return ret; |
133 |
|
|
|
134 |
|
✗ |
if (s->current_sample >= s->nb_samples) { |
135 |
|
✗ |
s->current_sample = 0; |
136 |
|
|
|
137 |
|
✗ |
if (s->loop > 0) |
138 |
|
✗ |
s->loop--; |
139 |
|
|
} |
140 |
|
|
} |
141 |
|
|
|
142 |
|
✗ |
return ret; |
143 |
|
|
} |
144 |
|
|
|
145 |
|
✗ |
static int afilter_frame(AVFilterLink *inlink, AVFrame *frame) |
146 |
|
|
{ |
147 |
|
✗ |
AVFilterContext *ctx = inlink->dst; |
148 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
149 |
|
✗ |
LoopContext *s = ctx->priv; |
150 |
|
✗ |
int ret = 0; |
151 |
|
|
|
152 |
|
✗ |
if (((s->start >= 0 && s->ignored_samples + frame->nb_samples > s->start) || |
153 |
|
✗ |
(s->time_pts != AV_NOPTS_VALUE && |
154 |
|
✗ |
frame->pts >= s->time_pts)) && |
155 |
|
✗ |
s->size > 0 && s->loop != 0) { |
156 |
|
✗ |
if (s->nb_samples < s->size) { |
157 |
|
✗ |
int written = FFMIN(frame->nb_samples, s->size - s->nb_samples); |
158 |
|
✗ |
int drain = 0; |
159 |
|
|
|
160 |
|
✗ |
if (s->start < 0) |
161 |
|
✗ |
s->start = inlink->sample_count_out - written; |
162 |
|
|
|
163 |
|
✗ |
ret = av_audio_fifo_write(s->fifo, (void **)frame->extended_data, written); |
164 |
|
✗ |
if (ret < 0) |
165 |
|
✗ |
return ret; |
166 |
|
✗ |
if (!s->nb_samples) { |
167 |
|
✗ |
drain = FFMAX(0, s->start - s->ignored_samples); |
168 |
|
✗ |
s->pts = frame->pts; |
169 |
|
✗ |
av_audio_fifo_drain(s->fifo, drain); |
170 |
|
✗ |
s->pts += av_rescale_q(s->start - s->ignored_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); |
171 |
|
|
} |
172 |
|
✗ |
s->nb_samples += ret - drain; |
173 |
|
✗ |
drain = frame->nb_samples - written; |
174 |
|
✗ |
if (s->nb_samples == s->size && drain > 0) { |
175 |
|
|
int ret2; |
176 |
|
|
|
177 |
|
✗ |
ret2 = av_audio_fifo_write(s->left, (void **)frame->extended_data, frame->nb_samples); |
178 |
|
✗ |
if (ret2 < 0) |
179 |
|
✗ |
return ret2; |
180 |
|
✗ |
av_audio_fifo_drain(s->left, drain); |
181 |
|
|
} |
182 |
|
✗ |
frame->nb_samples = ret; |
183 |
|
✗ |
s->pts += av_rescale_q(ret, (AVRational){1, outlink->sample_rate}, outlink->time_base); |
184 |
|
✗ |
ret = ff_filter_frame(outlink, frame); |
185 |
|
|
} else { |
186 |
|
✗ |
int nb_samples = frame->nb_samples; |
187 |
|
|
|
188 |
|
✗ |
av_frame_free(&frame); |
189 |
|
✗ |
ret = push_samples(ctx, nb_samples); |
190 |
|
|
} |
191 |
|
|
} else { |
192 |
|
✗ |
s->ignored_samples += frame->nb_samples; |
193 |
|
✗ |
frame->pts = s->pts; |
194 |
|
✗ |
s->pts += av_rescale_q(frame->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); |
195 |
|
✗ |
ret = ff_filter_frame(outlink, frame); |
196 |
|
|
} |
197 |
|
|
|
198 |
|
✗ |
return ret; |
199 |
|
|
} |
200 |
|
|
|
201 |
|
✗ |
static int arequest_frame(AVFilterLink *outlink) |
202 |
|
|
{ |
203 |
|
✗ |
AVFilterContext *ctx = outlink->src; |
204 |
|
✗ |
LoopContext *s = ctx->priv; |
205 |
|
✗ |
int ret = 0; |
206 |
|
|
|
207 |
|
✗ |
if ((!s->size) || |
208 |
|
✗ |
(s->nb_samples < s->size) || |
209 |
|
✗ |
(s->nb_samples >= s->size && s->loop == 0)) { |
210 |
|
✗ |
int nb_samples = av_audio_fifo_size(s->left); |
211 |
|
|
|
212 |
|
✗ |
if (s->loop == 0 && nb_samples > 0) { |
213 |
|
|
AVFrame *out; |
214 |
|
|
|
215 |
|
✗ |
out = ff_get_audio_buffer(outlink, nb_samples); |
216 |
|
✗ |
if (!out) |
217 |
|
✗ |
return AVERROR(ENOMEM); |
218 |
|
✗ |
av_audio_fifo_read(s->left, (void **)out->extended_data, nb_samples); |
219 |
|
✗ |
out->pts = s->pts; |
220 |
|
✗ |
s->pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); |
221 |
|
✗ |
ret = ff_filter_frame(outlink, out); |
222 |
|
✗ |
if (ret < 0) |
223 |
|
✗ |
return ret; |
224 |
|
|
} |
225 |
|
✗ |
ret = ff_request_frame(ctx->inputs[0]); |
226 |
|
|
} else { |
227 |
|
✗ |
ret = push_samples(ctx, 1024); |
228 |
|
|
} |
229 |
|
|
|
230 |
|
✗ |
if (s->eof && s->nb_samples > 0 && s->loop != 0) { |
231 |
|
✗ |
ret = push_samples(ctx, 1024); |
232 |
|
|
} |
233 |
|
|
|
234 |
|
✗ |
return ret; |
235 |
|
|
} |
236 |
|
|
|
237 |
|
✗ |
static int aactivate(AVFilterContext *ctx) |
238 |
|
|
{ |
239 |
|
✗ |
AVFilterLink *inlink = ctx->inputs[0]; |
240 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
241 |
|
✗ |
LoopContext *s = ctx->priv; |
242 |
|
✗ |
AVFrame *frame = NULL; |
243 |
|
|
int ret, status; |
244 |
|
|
|
245 |
|
✗ |
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); |
246 |
|
|
|
247 |
|
✗ |
update_time(ctx, inlink->time_base); |
248 |
|
|
|
249 |
|
✗ |
if (!s->eof && (s->nb_samples < s->size || !s->loop || !s->size)) { |
250 |
|
✗ |
const int in_nb_samples = FFMIN(1024, s->size - s->nb_samples); |
251 |
|
✗ |
if (in_nb_samples == 0) |
252 |
|
✗ |
ret = ff_inlink_consume_frame(inlink, &frame); |
253 |
|
|
else |
254 |
|
✗ |
ret = ff_inlink_consume_samples(inlink, in_nb_samples, in_nb_samples, &frame); |
255 |
|
✗ |
if (ret < 0) |
256 |
|
✗ |
return ret; |
257 |
|
✗ |
if (ret > 0) |
258 |
|
✗ |
return afilter_frame(inlink, frame); |
259 |
|
|
} |
260 |
|
|
|
261 |
|
✗ |
if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &s->eof_pts)) { |
262 |
|
✗ |
if (status == AVERROR_EOF) { |
263 |
|
✗ |
s->size = s->nb_samples; |
264 |
|
✗ |
s->eof = 1; |
265 |
|
|
} |
266 |
|
|
} |
267 |
|
|
|
268 |
|
✗ |
if (s->eof && (!s->loop || !s->size)) { |
269 |
|
✗ |
ff_outlink_set_status(outlink, AVERROR_EOF, s->eof_pts + s->pts_offset); |
270 |
|
✗ |
return 0; |
271 |
|
|
} |
272 |
|
|
|
273 |
|
✗ |
if (!s->eof && (!s->size || |
274 |
|
✗ |
(s->nb_samples < s->size) || |
275 |
|
✗ |
(s->nb_samples >= s->size && s->loop == 0))) { |
276 |
|
✗ |
FF_FILTER_FORWARD_WANTED(outlink, inlink); |
277 |
|
✗ |
} else if (s->loop && s->nb_samples == s->size) { |
278 |
|
✗ |
return arequest_frame(outlink); |
279 |
|
|
} |
280 |
|
|
|
281 |
|
✗ |
return FFERROR_NOT_READY; |
282 |
|
|
} |
283 |
|
|
|
284 |
|
|
static const AVOption aloop_options[] = { |
285 |
|
|
{ "loop", "number of loops", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, INT_MAX, AFLAGS }, |
286 |
|
|
{ "size", "max number of samples to loop", OFFSET(size), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT32_MAX, AFLAGS }, |
287 |
|
|
{ "start", "set the loop start sample", OFFSET(start), AV_OPT_TYPE_INT64, {.i64 = 0 }, -1, INT64_MAX, AFLAGS }, |
288 |
|
|
{ "time", "set the loop start time", OFFSET(time), AV_OPT_TYPE_DURATION, {.i64=INT64_MAX}, INT64_MIN, INT64_MAX, AFLAGS }, |
289 |
|
|
{ NULL } |
290 |
|
|
}; |
291 |
|
|
|
292 |
|
|
AVFILTER_DEFINE_CLASS(aloop); |
293 |
|
|
|
294 |
|
|
static const AVFilterPad ainputs[] = { |
295 |
|
|
{ |
296 |
|
|
.name = "default", |
297 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
298 |
|
|
.config_props = aconfig_input, |
299 |
|
|
}, |
300 |
|
|
}; |
301 |
|
|
|
302 |
|
|
const AVFilter ff_af_aloop = { |
303 |
|
|
.name = "aloop", |
304 |
|
|
.description = NULL_IF_CONFIG_SMALL("Loop audio samples."), |
305 |
|
|
.priv_size = sizeof(LoopContext), |
306 |
|
|
.priv_class = &aloop_class, |
307 |
|
|
.activate = aactivate, |
308 |
|
|
.uninit = auninit, |
309 |
|
|
FILTER_INPUTS(ainputs), |
310 |
|
|
FILTER_OUTPUTS(ff_audio_default_filterpad), |
311 |
|
|
}; |
312 |
|
|
#endif /* CONFIG_ALOOP_FILTER */ |
313 |
|
|
|
314 |
|
|
#if CONFIG_LOOP_FILTER |
315 |
|
|
|
316 |
|
✗ |
static av_cold int init(AVFilterContext *ctx) |
317 |
|
|
{ |
318 |
|
✗ |
LoopContext *s = ctx->priv; |
319 |
|
|
|
320 |
|
✗ |
s->time_pts = AV_NOPTS_VALUE; |
321 |
|
|
|
322 |
|
✗ |
s->frames = av_calloc(s->size, sizeof(*s->frames)); |
323 |
|
✗ |
if (!s->frames) |
324 |
|
✗ |
return AVERROR(ENOMEM); |
325 |
|
|
|
326 |
|
✗ |
check_size(ctx); |
327 |
|
|
|
328 |
|
✗ |
return 0; |
329 |
|
|
} |
330 |
|
|
|
331 |
|
✗ |
static void free_frames(AVFilterContext *ctx) |
332 |
|
|
{ |
333 |
|
✗ |
LoopContext *s = ctx->priv; |
334 |
|
|
|
335 |
|
✗ |
for (int i = 0; i < s->nb_frames; i++) |
336 |
|
✗ |
av_frame_free(&s->frames[i]); |
337 |
|
✗ |
} |
338 |
|
|
|
339 |
|
✗ |
static av_cold void uninit(AVFilterContext *ctx) |
340 |
|
|
{ |
341 |
|
✗ |
LoopContext *s = ctx->priv; |
342 |
|
|
|
343 |
|
✗ |
free_frames(ctx); |
344 |
|
✗ |
av_freep(&s->frames); |
345 |
|
✗ |
s->nb_frames = 0; |
346 |
|
✗ |
} |
347 |
|
|
|
348 |
|
✗ |
static int push_frame(AVFilterContext *ctx) |
349 |
|
|
{ |
350 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
351 |
|
✗ |
LoopContext *s = ctx->priv; |
352 |
|
|
AVFrame *out; |
353 |
|
|
int ret; |
354 |
|
|
|
355 |
|
✗ |
out = av_frame_clone(s->frames[s->current_frame]); |
356 |
|
✗ |
if (!out) |
357 |
|
✗ |
return AVERROR(ENOMEM); |
358 |
|
✗ |
out->pts += s->pts_offset; |
359 |
|
✗ |
ret = ff_filter_frame(outlink, out); |
360 |
|
✗ |
s->current_frame++; |
361 |
|
|
|
362 |
|
✗ |
if (s->current_frame >= s->nb_frames) { |
363 |
|
✗ |
s->current_frame = 0; |
364 |
|
|
|
365 |
|
✗ |
s->pts_offset += s->duration; |
366 |
|
✗ |
if (s->loop > 0) |
367 |
|
✗ |
s->loop--; |
368 |
|
✗ |
if (s->loop == 0) |
369 |
|
✗ |
free_frames(ctx); |
370 |
|
|
} |
371 |
|
|
|
372 |
|
✗ |
return ret; |
373 |
|
|
} |
374 |
|
|
|
375 |
|
✗ |
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
376 |
|
|
{ |
377 |
|
✗ |
AVFilterContext *ctx = inlink->dst; |
378 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
379 |
|
✗ |
LoopContext *s = ctx->priv; |
380 |
|
|
int64_t duration; |
381 |
|
✗ |
int ret = 0; |
382 |
|
|
|
383 |
|
✗ |
if (((s->start >= 0 && inlink->frame_count_out >= s->start) || |
384 |
|
✗ |
(s->time_pts != AV_NOPTS_VALUE && |
385 |
|
✗ |
frame->pts >= s->time_pts)) && |
386 |
|
✗ |
s->size > 0 && s->loop != 0) { |
387 |
|
✗ |
if (s->nb_frames < s->size) { |
388 |
|
✗ |
s->frames[s->nb_frames] = av_frame_clone(frame); |
389 |
|
✗ |
if (!s->frames[s->nb_frames]) { |
390 |
|
✗ |
av_frame_free(&frame); |
391 |
|
✗ |
return AVERROR(ENOMEM); |
392 |
|
|
} |
393 |
|
✗ |
s->nb_frames++; |
394 |
|
✗ |
if (frame->duration) |
395 |
|
✗ |
duration = frame->duration; |
396 |
|
|
else |
397 |
|
✗ |
duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base); |
398 |
|
✗ |
s->duration += duration; |
399 |
|
✗ |
s->pts_offset = s->duration; |
400 |
|
✗ |
ret = ff_filter_frame(outlink, frame); |
401 |
|
|
} else { |
402 |
|
✗ |
av_frame_free(&frame); |
403 |
|
✗ |
ret = push_frame(ctx); |
404 |
|
|
} |
405 |
|
|
} else { |
406 |
|
✗ |
frame->pts += s->pts_offset - s->duration; |
407 |
|
✗ |
ret = ff_filter_frame(outlink, frame); |
408 |
|
|
} |
409 |
|
|
|
410 |
|
✗ |
return ret; |
411 |
|
|
} |
412 |
|
|
|
413 |
|
✗ |
static int activate(AVFilterContext *ctx) |
414 |
|
|
{ |
415 |
|
✗ |
AVFilterLink *inlink = ctx->inputs[0]; |
416 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
417 |
|
✗ |
LoopContext *s = ctx->priv; |
418 |
|
✗ |
AVFrame *frame = NULL; |
419 |
|
|
int ret, status; |
420 |
|
|
|
421 |
|
✗ |
ret = ff_outlink_get_status(outlink); |
422 |
|
✗ |
if (ret) { |
423 |
|
✗ |
ff_inlink_set_status(inlink, ret); |
424 |
|
✗ |
free_frames(ctx); |
425 |
|
✗ |
return 0; |
426 |
|
|
} |
427 |
|
|
|
428 |
|
✗ |
update_time(ctx, inlink->time_base); |
429 |
|
|
|
430 |
|
✗ |
if (!s->eof && (s->nb_frames < s->size || !s->loop || !s->size)) { |
431 |
|
✗ |
ret = ff_inlink_consume_frame(inlink, &frame); |
432 |
|
✗ |
if (ret < 0) |
433 |
|
✗ |
return ret; |
434 |
|
✗ |
if (ret > 0) |
435 |
|
✗ |
return filter_frame(inlink, frame); |
436 |
|
|
} |
437 |
|
|
|
438 |
|
✗ |
if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &s->eof_pts)) { |
439 |
|
✗ |
if (status == AVERROR_EOF) { |
440 |
|
✗ |
s->size = s->nb_frames; |
441 |
|
✗ |
s->eof = 1; |
442 |
|
|
} |
443 |
|
|
} |
444 |
|
|
|
445 |
|
✗ |
if (s->eof && (!s->loop || !s->size)) { |
446 |
|
✗ |
ff_outlink_set_status(outlink, AVERROR_EOF, s->eof_pts + s->pts_offset); |
447 |
|
✗ |
free_frames(ctx); |
448 |
|
✗ |
return 0; |
449 |
|
|
} |
450 |
|
|
|
451 |
|
✗ |
if (!s->eof && (!s->size || |
452 |
|
✗ |
(s->nb_frames < s->size) || |
453 |
|
✗ |
(s->nb_frames >= s->size && s->loop == 0))) { |
454 |
|
✗ |
FF_FILTER_FORWARD_WANTED(outlink, inlink); |
455 |
|
✗ |
} else if (s->loop && s->nb_frames == s->size) { |
456 |
|
✗ |
return push_frame(ctx); |
457 |
|
|
} |
458 |
|
|
|
459 |
|
✗ |
return FFERROR_NOT_READY; |
460 |
|
|
} |
461 |
|
|
|
462 |
|
|
static const AVOption loop_options[] = { |
463 |
|
|
{ "loop", "number of loops", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, INT_MAX, VFLAGS }, |
464 |
|
|
{ "size", "max number of frames to loop", OFFSET(size), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT16_MAX, VFLAGS }, |
465 |
|
|
{ "start", "set the loop start frame", OFFSET(start), AV_OPT_TYPE_INT64, {.i64 = 0 }, -1, INT64_MAX, VFLAGS }, |
466 |
|
|
{ "time", "set the loop start time", OFFSET(time), AV_OPT_TYPE_DURATION, {.i64=INT64_MAX}, INT64_MIN, INT64_MAX, VFLAGS }, |
467 |
|
|
{ NULL } |
468 |
|
|
}; |
469 |
|
|
|
470 |
|
|
AVFILTER_DEFINE_CLASS(loop); |
471 |
|
|
|
472 |
|
|
const AVFilter ff_vf_loop = { |
473 |
|
|
.name = "loop", |
474 |
|
|
.description = NULL_IF_CONFIG_SMALL("Loop video frames."), |
475 |
|
|
.priv_size = sizeof(LoopContext), |
476 |
|
|
.priv_class = &loop_class, |
477 |
|
|
.init = init, |
478 |
|
|
.uninit = uninit, |
479 |
|
|
.activate = activate, |
480 |
|
|
FILTER_INPUTS(ff_video_default_filterpad), |
481 |
|
|
FILTER_OUTPUTS(ff_video_default_filterpad), |
482 |
|
|
}; |
483 |
|
|
#endif /* CONFIG_LOOP_FILTER */ |
484 |
|
|
|