Line |
Branch |
Exec |
Source |
1 |
|
|
/* |
2 |
|
|
* Copyright (c) 2016 Paul B Mahol |
3 |
|
|
* |
4 |
|
|
* This file is part of FFmpeg. |
5 |
|
|
* |
6 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
7 |
|
|
* modify it under the terms of the GNU Lesser General Public |
8 |
|
|
* License as published by the Free Software Foundation; either |
9 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
12 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
|
|
* Lesser General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Lesser General Public |
17 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
18 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
#include "config_components.h" |
22 |
|
|
|
23 |
|
|
#include "libavutil/audio_fifo.h" |
24 |
|
|
#include "libavutil/avassert.h" |
25 |
|
|
#include "libavutil/internal.h" |
26 |
|
|
#include "libavutil/mem.h" |
27 |
|
|
#include "libavutil/opt.h" |
28 |
|
|
#include "avfilter.h" |
29 |
|
|
#include "audio.h" |
30 |
|
|
#include "filters.h" |
31 |
|
|
#include "video.h" |
32 |
|
|
|
33 |
|
|
typedef struct LoopContext { |
34 |
|
|
const AVClass *class; |
35 |
|
|
|
36 |
|
|
AVAudioFifo *fifo; |
37 |
|
|
AVAudioFifo *left; |
38 |
|
|
AVFrame **frames; |
39 |
|
|
int nb_frames; |
40 |
|
|
int current_frame; |
41 |
|
|
int64_t time_pts; |
42 |
|
|
int64_t duration; |
43 |
|
|
int64_t current_sample; |
44 |
|
|
int64_t nb_samples; |
45 |
|
|
int64_t ignored_samples; |
46 |
|
|
|
47 |
|
|
int loop; |
48 |
|
|
int eof; |
49 |
|
|
int64_t size; |
50 |
|
|
int64_t start; |
51 |
|
|
int64_t time; |
52 |
|
|
int64_t pts; |
53 |
|
|
int64_t pts_offset; |
54 |
|
|
int64_t eof_pts; |
55 |
|
|
} LoopContext; |
56 |
|
|
|
57 |
|
|
#define AFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM |
58 |
|
|
#define VFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM |
59 |
|
|
#define OFFSET(x) offsetof(LoopContext, x) |
60 |
|
|
|
61 |
|
✗ |
static void check_size(AVFilterContext *ctx) |
62 |
|
|
{ |
63 |
|
✗ |
LoopContext *s = ctx->priv; |
64 |
|
|
|
65 |
|
✗ |
if (!s->size) |
66 |
|
✗ |
av_log(ctx, AV_LOG_WARNING, "Number of %s to loop is not set!\n", |
67 |
|
✗ |
ctx->input_pads[0].type == AVMEDIA_TYPE_VIDEO ? "frames" : "samples"); |
68 |
|
✗ |
} |
69 |
|
|
|
70 |
|
✗ |
static void update_time(AVFilterContext *ctx, AVRational tb) |
71 |
|
|
{ |
72 |
|
✗ |
LoopContext *s = ctx->priv; |
73 |
|
|
|
74 |
|
✗ |
if (s->time != INT64_MAX) { |
75 |
|
✗ |
int64_t time_pts = av_rescale_q(s->time, AV_TIME_BASE_Q, tb); |
76 |
|
✗ |
if (s->time_pts == AV_NOPTS_VALUE || time_pts < s->time_pts) |
77 |
|
✗ |
s->time_pts = time_pts; |
78 |
|
|
} |
79 |
|
✗ |
} |
80 |
|
|
|
81 |
|
|
#if CONFIG_ALOOP_FILTER |
82 |
|
|
|
83 |
|
✗ |
static int aconfig_input(AVFilterLink *inlink) |
84 |
|
|
{ |
85 |
|
✗ |
AVFilterContext *ctx = inlink->dst; |
86 |
|
✗ |
LoopContext *s = ctx->priv; |
87 |
|
|
|
88 |
|
✗ |
s->time_pts = AV_NOPTS_VALUE; |
89 |
|
|
|
90 |
|
✗ |
s->fifo = av_audio_fifo_alloc(inlink->format, inlink->ch_layout.nb_channels, 8192); |
91 |
|
✗ |
s->left = av_audio_fifo_alloc(inlink->format, inlink->ch_layout.nb_channels, 8192); |
92 |
|
✗ |
if (!s->fifo || !s->left) |
93 |
|
✗ |
return AVERROR(ENOMEM); |
94 |
|
|
|
95 |
|
✗ |
check_size(ctx); |
96 |
|
|
|
97 |
|
✗ |
return 0; |
98 |
|
|
} |
99 |
|
|
|
100 |
|
✗ |
static av_cold void auninit(AVFilterContext *ctx) |
101 |
|
|
{ |
102 |
|
✗ |
LoopContext *s = ctx->priv; |
103 |
|
|
|
104 |
|
✗ |
av_audio_fifo_free(s->fifo); |
105 |
|
✗ |
av_audio_fifo_free(s->left); |
106 |
|
✗ |
} |
107 |
|
|
|
108 |
|
✗ |
static int push_samples(AVFilterContext *ctx, int nb_samples, AVFrame **frame) |
109 |
|
|
{ |
110 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
111 |
|
✗ |
LoopContext *s = ctx->priv; |
112 |
|
|
AVFrame *out; |
113 |
|
✗ |
int ret = 0, i = 0; |
114 |
|
|
|
115 |
|
✗ |
while (s->loop != 0 && i < nb_samples) { |
116 |
|
✗ |
out = ff_get_audio_buffer(outlink, FFMIN(nb_samples, s->nb_samples - s->current_sample)); |
117 |
|
✗ |
if (!out) |
118 |
|
✗ |
return AVERROR(ENOMEM); |
119 |
|
✗ |
ret = av_audio_fifo_peek_at(s->fifo, (void **)out->extended_data, out->nb_samples, s->current_sample); |
120 |
|
✗ |
if (ret < 0) { |
121 |
|
✗ |
av_frame_free(&out); |
122 |
|
✗ |
return ret; |
123 |
|
|
} |
124 |
|
✗ |
out->pts = s->pts; |
125 |
|
✗ |
out->nb_samples = ret; |
126 |
|
✗ |
s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); |
127 |
|
✗ |
i += out->nb_samples; |
128 |
|
✗ |
s->current_sample += out->nb_samples; |
129 |
|
|
|
130 |
|
✗ |
*frame = out; |
131 |
|
|
|
132 |
|
✗ |
if (s->current_sample >= s->nb_samples) { |
133 |
|
✗ |
s->current_sample = 0; |
134 |
|
|
|
135 |
|
✗ |
if (s->loop > 0) |
136 |
|
✗ |
s->loop--; |
137 |
|
|
} |
138 |
|
|
|
139 |
|
✗ |
return 0; |
140 |
|
|
} |
141 |
|
|
|
142 |
|
✗ |
return ret; |
143 |
|
|
} |
144 |
|
|
|
145 |
|
✗ |
static int afilter_frame(AVFilterLink *inlink, AVFrame *frame) |
146 |
|
|
{ |
147 |
|
✗ |
FilterLink *inl = ff_filter_link(inlink); |
148 |
|
✗ |
AVFilterContext *ctx = inlink->dst; |
149 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
150 |
|
✗ |
LoopContext *s = ctx->priv; |
151 |
|
✗ |
int ret = 0; |
152 |
|
|
|
153 |
|
✗ |
if (((s->start >= 0 && s->ignored_samples + frame->nb_samples > s->start) || |
154 |
|
✗ |
(s->time_pts != AV_NOPTS_VALUE && |
155 |
|
✗ |
frame->pts >= s->time_pts)) && |
156 |
|
✗ |
s->size > 0 && s->loop != 0) { |
157 |
|
✗ |
if (s->nb_samples < s->size) { |
158 |
|
✗ |
int written = FFMIN(frame->nb_samples, s->size - s->nb_samples); |
159 |
|
✗ |
int drain = 0; |
160 |
|
|
|
161 |
|
✗ |
if (s->start < 0) |
162 |
|
✗ |
s->start = inl->sample_count_out - written; |
163 |
|
|
|
164 |
|
✗ |
ret = av_audio_fifo_write(s->fifo, (void **)frame->extended_data, written); |
165 |
|
✗ |
if (ret < 0) |
166 |
|
✗ |
return ret; |
167 |
|
✗ |
if (!s->nb_samples) { |
168 |
|
✗ |
drain = FFMAX(0, s->start - s->ignored_samples); |
169 |
|
✗ |
s->pts = frame->pts; |
170 |
|
✗ |
av_audio_fifo_drain(s->fifo, drain); |
171 |
|
✗ |
s->pts += av_rescale_q(s->start - s->ignored_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); |
172 |
|
|
} |
173 |
|
✗ |
s->nb_samples += ret - drain; |
174 |
|
✗ |
if (s->nb_samples == s->size && frame->nb_samples > written) { |
175 |
|
|
int ret2; |
176 |
|
|
|
177 |
|
✗ |
ret2 = av_audio_fifo_write(s->left, (void **)frame->extended_data, frame->nb_samples); |
178 |
|
✗ |
if (ret2 < 0) |
179 |
|
✗ |
return ret2; |
180 |
|
✗ |
av_audio_fifo_drain(s->left, written); |
181 |
|
|
} |
182 |
|
✗ |
frame->nb_samples = ret; |
183 |
|
✗ |
s->pts += av_rescale_q(ret, (AVRational){1, outlink->sample_rate}, outlink->time_base); |
184 |
|
✗ |
ret = ff_filter_frame(outlink, frame); |
185 |
|
|
} else { |
186 |
|
✗ |
av_assert0(0); |
187 |
|
|
} |
188 |
|
|
} else { |
189 |
|
✗ |
s->ignored_samples += frame->nb_samples; |
190 |
|
✗ |
frame->pts = s->pts; |
191 |
|
✗ |
s->pts += av_rescale_q(frame->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); |
192 |
|
✗ |
ret = ff_filter_frame(outlink, frame); |
193 |
|
|
} |
194 |
|
|
|
195 |
|
✗ |
return ret; |
196 |
|
|
} |
197 |
|
|
|
198 |
|
✗ |
static int arequest_frame(AVFilterLink *outlink, AVFrame **frame) |
199 |
|
|
{ |
200 |
|
✗ |
AVFilterContext *ctx = outlink->src; |
201 |
|
✗ |
LoopContext *s = ctx->priv; |
202 |
|
✗ |
int ret = 0; |
203 |
|
|
|
204 |
|
✗ |
if ((!s->size) || |
205 |
|
✗ |
(s->nb_samples < s->size) || |
206 |
|
✗ |
(s->nb_samples >= s->size && s->loop == 0)) { |
207 |
|
✗ |
int nb_samples = av_audio_fifo_size(s->left); |
208 |
|
|
|
209 |
|
✗ |
if (s->loop == 0 && nb_samples > 0) { |
210 |
|
|
AVFrame *out; |
211 |
|
|
|
212 |
|
✗ |
out = ff_get_audio_buffer(outlink, nb_samples); |
213 |
|
✗ |
if (!out) |
214 |
|
✗ |
return AVERROR(ENOMEM); |
215 |
|
✗ |
av_audio_fifo_read(s->left, (void **)out->extended_data, nb_samples); |
216 |
|
✗ |
out->pts = s->pts; |
217 |
|
✗ |
s->pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); |
218 |
|
✗ |
*frame = out; |
219 |
|
|
} |
220 |
|
✗ |
return 0; |
221 |
|
|
} else { |
222 |
|
✗ |
ret = push_samples(ctx, 1024, frame); |
223 |
|
|
} |
224 |
|
|
|
225 |
|
✗ |
return ret; |
226 |
|
|
} |
227 |
|
|
|
228 |
|
✗ |
static int aactivate(AVFilterContext *ctx) |
229 |
|
|
{ |
230 |
|
✗ |
AVFilterLink *inlink = ctx->inputs[0]; |
231 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
232 |
|
✗ |
LoopContext *s = ctx->priv; |
233 |
|
✗ |
AVFrame *frame = NULL; |
234 |
|
|
int ret, status; |
235 |
|
|
|
236 |
|
✗ |
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); |
237 |
|
|
|
238 |
|
✗ |
update_time(ctx, inlink->time_base); |
239 |
|
|
|
240 |
|
✗ |
retry: |
241 |
|
✗ |
ret = arequest_frame(outlink, &frame); |
242 |
|
✗ |
if (ret < 0) |
243 |
|
✗ |
return ret; |
244 |
|
✗ |
if (frame) |
245 |
|
✗ |
return ff_filter_frame(outlink, frame); |
246 |
|
|
|
247 |
|
✗ |
ret = ff_inlink_consume_frame(inlink, &frame); |
248 |
|
✗ |
if (ret < 0) |
249 |
|
✗ |
return ret; |
250 |
|
✗ |
if (ret > 0) |
251 |
|
✗ |
return afilter_frame(inlink, frame); |
252 |
|
|
|
253 |
|
✗ |
ret = ff_inlink_acknowledge_status(inlink, &status, &s->eof_pts); |
254 |
|
✗ |
if (ret) { |
255 |
|
✗ |
if (status == AVERROR_EOF && !s->eof) { |
256 |
|
✗ |
s->size = s->nb_samples; |
257 |
|
✗ |
s->eof = 1; |
258 |
|
✗ |
goto retry; |
259 |
|
|
} |
260 |
|
✗ |
ff_outlink_set_status(outlink, status, s->eof_pts); |
261 |
|
✗ |
return 0; |
262 |
|
|
} |
263 |
|
|
|
264 |
|
✗ |
FF_FILTER_FORWARD_WANTED(outlink, inlink); |
265 |
|
|
|
266 |
|
✗ |
return FFERROR_NOT_READY; |
267 |
|
|
} |
268 |
|
|
|
269 |
|
|
static const AVOption aloop_options[] = { |
270 |
|
|
{ "loop", "number of loops", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, INT_MAX, AFLAGS }, |
271 |
|
|
{ "size", "max number of samples to loop", OFFSET(size), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT32_MAX, AFLAGS }, |
272 |
|
|
{ "start", "set the loop start sample", OFFSET(start), AV_OPT_TYPE_INT64, {.i64 = 0 }, -1, INT64_MAX, AFLAGS }, |
273 |
|
|
{ "time", "set the loop start time", OFFSET(time), AV_OPT_TYPE_DURATION, {.i64=INT64_MAX}, INT64_MIN, INT64_MAX, AFLAGS }, |
274 |
|
|
{ NULL } |
275 |
|
|
}; |
276 |
|
|
|
277 |
|
|
AVFILTER_DEFINE_CLASS(aloop); |
278 |
|
|
|
279 |
|
|
static const AVFilterPad ainputs[] = { |
280 |
|
|
{ |
281 |
|
|
.name = "default", |
282 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
283 |
|
|
.config_props = aconfig_input, |
284 |
|
|
}, |
285 |
|
|
}; |
286 |
|
|
|
287 |
|
|
const AVFilter ff_af_aloop = { |
288 |
|
|
.name = "aloop", |
289 |
|
|
.description = NULL_IF_CONFIG_SMALL("Loop audio samples."), |
290 |
|
|
.priv_size = sizeof(LoopContext), |
291 |
|
|
.priv_class = &aloop_class, |
292 |
|
|
.activate = aactivate, |
293 |
|
|
.uninit = auninit, |
294 |
|
|
FILTER_INPUTS(ainputs), |
295 |
|
|
FILTER_OUTPUTS(ff_audio_default_filterpad), |
296 |
|
|
}; |
297 |
|
|
#endif /* CONFIG_ALOOP_FILTER */ |
298 |
|
|
|
299 |
|
|
#if CONFIG_LOOP_FILTER |
300 |
|
|
|
301 |
|
✗ |
static av_cold int init(AVFilterContext *ctx) |
302 |
|
|
{ |
303 |
|
✗ |
LoopContext *s = ctx->priv; |
304 |
|
|
|
305 |
|
✗ |
s->time_pts = AV_NOPTS_VALUE; |
306 |
|
|
|
307 |
|
✗ |
s->frames = av_calloc(s->size, sizeof(*s->frames)); |
308 |
|
✗ |
if (!s->frames) |
309 |
|
✗ |
return AVERROR(ENOMEM); |
310 |
|
|
|
311 |
|
✗ |
check_size(ctx); |
312 |
|
|
|
313 |
|
✗ |
return 0; |
314 |
|
|
} |
315 |
|
|
|
316 |
|
✗ |
static void free_frames(AVFilterContext *ctx) |
317 |
|
|
{ |
318 |
|
✗ |
LoopContext *s = ctx->priv; |
319 |
|
|
|
320 |
|
✗ |
for (int i = 0; i < s->nb_frames; i++) |
321 |
|
✗ |
av_frame_free(&s->frames[i]); |
322 |
|
✗ |
} |
323 |
|
|
|
324 |
|
✗ |
static av_cold void uninit(AVFilterContext *ctx) |
325 |
|
|
{ |
326 |
|
✗ |
LoopContext *s = ctx->priv; |
327 |
|
|
|
328 |
|
✗ |
free_frames(ctx); |
329 |
|
✗ |
av_freep(&s->frames); |
330 |
|
✗ |
s->nb_frames = 0; |
331 |
|
✗ |
} |
332 |
|
|
|
333 |
|
✗ |
static int push_frame(AVFilterContext *ctx) |
334 |
|
|
{ |
335 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
336 |
|
✗ |
LoopContext *s = ctx->priv; |
337 |
|
|
AVFrame *out; |
338 |
|
|
int ret; |
339 |
|
|
|
340 |
|
✗ |
out = av_frame_clone(s->frames[s->current_frame]); |
341 |
|
✗ |
if (!out) |
342 |
|
✗ |
return AVERROR(ENOMEM); |
343 |
|
✗ |
out->pts += s->pts_offset; |
344 |
|
✗ |
ret = ff_filter_frame(outlink, out); |
345 |
|
✗ |
s->current_frame++; |
346 |
|
|
|
347 |
|
✗ |
if (s->current_frame >= s->nb_frames) { |
348 |
|
✗ |
s->current_frame = 0; |
349 |
|
|
|
350 |
|
✗ |
s->pts_offset += s->duration; |
351 |
|
✗ |
if (s->loop > 0) |
352 |
|
✗ |
s->loop--; |
353 |
|
✗ |
if (s->loop == 0) |
354 |
|
✗ |
free_frames(ctx); |
355 |
|
|
} |
356 |
|
|
|
357 |
|
✗ |
return ret; |
358 |
|
|
} |
359 |
|
|
|
360 |
|
✗ |
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
361 |
|
|
{ |
362 |
|
✗ |
FilterLink *inl = ff_filter_link(inlink); |
363 |
|
✗ |
AVFilterContext *ctx = inlink->dst; |
364 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
365 |
|
✗ |
FilterLink *outl = ff_filter_link(outlink); |
366 |
|
✗ |
LoopContext *s = ctx->priv; |
367 |
|
|
int64_t duration; |
368 |
|
✗ |
int ret = 0; |
369 |
|
|
|
370 |
|
✗ |
if (((s->start >= 0 && inl->frame_count_out >= s->start) || |
371 |
|
✗ |
(s->time_pts != AV_NOPTS_VALUE && |
372 |
|
✗ |
frame->pts >= s->time_pts)) && |
373 |
|
✗ |
s->size > 0 && s->loop != 0) { |
374 |
|
✗ |
if (s->nb_frames < s->size) { |
375 |
|
✗ |
s->frames[s->nb_frames] = av_frame_clone(frame); |
376 |
|
✗ |
if (!s->frames[s->nb_frames]) { |
377 |
|
✗ |
av_frame_free(&frame); |
378 |
|
✗ |
return AVERROR(ENOMEM); |
379 |
|
|
} |
380 |
|
✗ |
s->nb_frames++; |
381 |
|
✗ |
if (frame->duration) |
382 |
|
✗ |
duration = frame->duration; |
383 |
|
|
else |
384 |
|
✗ |
duration = av_rescale_q(1, av_inv_q(outl->frame_rate), outlink->time_base); |
385 |
|
✗ |
s->duration += duration; |
386 |
|
✗ |
s->pts_offset = s->duration; |
387 |
|
✗ |
ret = ff_filter_frame(outlink, frame); |
388 |
|
|
} else { |
389 |
|
✗ |
av_frame_free(&frame); |
390 |
|
✗ |
ret = push_frame(ctx); |
391 |
|
|
} |
392 |
|
|
} else { |
393 |
|
✗ |
frame->pts += s->pts_offset - s->duration; |
394 |
|
✗ |
ret = ff_filter_frame(outlink, frame); |
395 |
|
|
} |
396 |
|
|
|
397 |
|
✗ |
return ret; |
398 |
|
|
} |
399 |
|
|
|
400 |
|
✗ |
static int activate(AVFilterContext *ctx) |
401 |
|
|
{ |
402 |
|
✗ |
AVFilterLink *inlink = ctx->inputs[0]; |
403 |
|
✗ |
AVFilterLink *outlink = ctx->outputs[0]; |
404 |
|
✗ |
LoopContext *s = ctx->priv; |
405 |
|
✗ |
AVFrame *frame = NULL; |
406 |
|
|
int ret, status; |
407 |
|
|
|
408 |
|
✗ |
ret = ff_outlink_get_status(outlink); |
409 |
|
✗ |
if (ret) { |
410 |
|
✗ |
ff_inlink_set_status(inlink, ret); |
411 |
|
✗ |
free_frames(ctx); |
412 |
|
✗ |
return 0; |
413 |
|
|
} |
414 |
|
|
|
415 |
|
✗ |
update_time(ctx, inlink->time_base); |
416 |
|
|
|
417 |
|
✗ |
if (!s->eof && (s->nb_frames < s->size || !s->loop || !s->size)) { |
418 |
|
✗ |
ret = ff_inlink_consume_frame(inlink, &frame); |
419 |
|
✗ |
if (ret < 0) |
420 |
|
✗ |
return ret; |
421 |
|
✗ |
if (ret > 0) |
422 |
|
✗ |
return filter_frame(inlink, frame); |
423 |
|
|
} |
424 |
|
|
|
425 |
|
✗ |
if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &s->eof_pts)) { |
426 |
|
✗ |
if (status == AVERROR_EOF) { |
427 |
|
✗ |
s->size = s->nb_frames; |
428 |
|
✗ |
s->eof = 1; |
429 |
|
|
} |
430 |
|
|
} |
431 |
|
|
|
432 |
|
✗ |
if (s->eof && (!s->loop || !s->size)) { |
433 |
|
✗ |
ff_outlink_set_status(outlink, AVERROR_EOF, s->eof_pts + s->pts_offset); |
434 |
|
✗ |
free_frames(ctx); |
435 |
|
✗ |
return 0; |
436 |
|
|
} |
437 |
|
|
|
438 |
|
✗ |
if (!s->eof && (!s->size || |
439 |
|
✗ |
(s->nb_frames < s->size) || |
440 |
|
✗ |
(s->nb_frames >= s->size && s->loop == 0))) { |
441 |
|
✗ |
FF_FILTER_FORWARD_WANTED(outlink, inlink); |
442 |
|
✗ |
} else if (s->loop && s->nb_frames == s->size) { |
443 |
|
✗ |
return push_frame(ctx); |
444 |
|
|
} |
445 |
|
|
|
446 |
|
✗ |
return FFERROR_NOT_READY; |
447 |
|
|
} |
448 |
|
|
|
449 |
|
|
static const AVOption loop_options[] = { |
450 |
|
|
{ "loop", "number of loops", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, INT_MAX, VFLAGS }, |
451 |
|
|
{ "size", "max number of frames to loop", OFFSET(size), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT16_MAX, VFLAGS }, |
452 |
|
|
{ "start", "set the loop start frame", OFFSET(start), AV_OPT_TYPE_INT64, {.i64 = 0 }, -1, INT64_MAX, VFLAGS }, |
453 |
|
|
{ "time", "set the loop start time", OFFSET(time), AV_OPT_TYPE_DURATION, {.i64=INT64_MAX}, INT64_MIN, INT64_MAX, VFLAGS }, |
454 |
|
|
{ NULL } |
455 |
|
|
}; |
456 |
|
|
|
457 |
|
|
AVFILTER_DEFINE_CLASS(loop); |
458 |
|
|
|
459 |
|
|
const AVFilter ff_vf_loop = { |
460 |
|
|
.name = "loop", |
461 |
|
|
.description = NULL_IF_CONFIG_SMALL("Loop video frames."), |
462 |
|
|
.priv_size = sizeof(LoopContext), |
463 |
|
|
.priv_class = &loop_class, |
464 |
|
|
.init = init, |
465 |
|
|
.uninit = uninit, |
466 |
|
|
.activate = activate, |
467 |
|
|
FILTER_INPUTS(ff_video_default_filterpad), |
468 |
|
|
FILTER_OUTPUTS(ff_video_default_filterpad), |
469 |
|
|
}; |
470 |
|
|
#endif /* CONFIG_LOOP_FILTER */ |
471 |
|
|
|