1 |
|
|
/* |
2 |
|
|
* Copyright (c) 2015 Derek Buitenhuis |
3 |
|
|
* |
4 |
|
|
* This file is part of FFmpeg. |
5 |
|
|
* |
6 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
7 |
|
|
* modify it under the terms of the GNU Lesser General Public |
8 |
|
|
* License as published by the Free Software Foundation; either |
9 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
12 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
|
|
* Lesser General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Lesser General Public |
17 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
18 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
#include "libavutil/opt.h" |
22 |
|
|
#include "avfilter.h" |
23 |
|
|
#include "formats.h" |
24 |
|
|
#include "internal.h" |
25 |
|
|
#include "video.h" |
26 |
|
|
|
27 |
|
|
#define DEFAULT_LENGTH 300 |
28 |
|
|
|
29 |
|
|
typedef struct ReverseContext { |
30 |
|
|
int nb_frames; |
31 |
|
|
AVFrame **frames; |
32 |
|
|
unsigned int frames_size; |
33 |
|
|
unsigned int pts_size; |
34 |
|
|
int64_t *pts; |
35 |
|
|
int flush_idx; |
36 |
|
|
} ReverseContext; |
37 |
|
|
|
38 |
|
|
static av_cold int init(AVFilterContext *ctx) |
39 |
|
|
{ |
40 |
|
|
ReverseContext *s = ctx->priv; |
41 |
|
|
|
42 |
|
|
s->pts = av_fast_realloc(NULL, &s->pts_size, |
43 |
|
|
DEFAULT_LENGTH * sizeof(*(s->pts))); |
44 |
|
|
if (!s->pts) |
45 |
|
|
return AVERROR(ENOMEM); |
46 |
|
|
|
47 |
|
|
s->frames = av_fast_realloc(NULL, &s->frames_size, |
48 |
|
|
DEFAULT_LENGTH * sizeof(*(s->frames))); |
49 |
|
|
if (!s->frames) { |
50 |
|
|
av_freep(&s->pts); |
51 |
|
|
return AVERROR(ENOMEM); |
52 |
|
|
} |
53 |
|
|
|
54 |
|
|
return 0; |
55 |
|
|
} |
56 |
|
|
|
57 |
|
|
static av_cold void uninit(AVFilterContext *ctx) |
58 |
|
|
{ |
59 |
|
|
ReverseContext *s = ctx->priv; |
60 |
|
|
|
61 |
|
|
while (s->nb_frames > 0) { |
62 |
|
|
av_frame_free(&s->frames[s->nb_frames - 1]); |
63 |
|
|
s->nb_frames--; |
64 |
|
|
} |
65 |
|
|
|
66 |
|
|
av_freep(&s->pts); |
67 |
|
|
av_freep(&s->frames); |
68 |
|
|
} |
69 |
|
|
|
70 |
|
|
static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
71 |
|
|
{ |
72 |
|
|
AVFilterContext *ctx = inlink->dst; |
73 |
|
|
ReverseContext *s = ctx->priv; |
74 |
|
|
void *ptr; |
75 |
|
|
|
76 |
|
|
if (s->nb_frames + 1 > s->pts_size / sizeof(*(s->pts))) { |
77 |
|
|
ptr = av_fast_realloc(s->pts, &s->pts_size, s->pts_size * 2); |
78 |
|
|
if (!ptr) |
79 |
|
|
return AVERROR(ENOMEM); |
80 |
|
|
s->pts = ptr; |
81 |
|
|
} |
82 |
|
|
|
83 |
|
|
if (s->nb_frames + 1 > s->frames_size / sizeof(*(s->frames))) { |
84 |
|
|
ptr = av_fast_realloc(s->frames, &s->frames_size, s->frames_size * 2); |
85 |
|
|
if (!ptr) |
86 |
|
|
return AVERROR(ENOMEM); |
87 |
|
|
s->frames = ptr; |
88 |
|
|
} |
89 |
|
|
|
90 |
|
|
s->frames[s->nb_frames] = in; |
91 |
|
|
s->pts[s->nb_frames] = in->pts; |
92 |
|
|
s->nb_frames++; |
93 |
|
|
|
94 |
|
|
return 0; |
95 |
|
|
} |
96 |
|
|
|
97 |
|
|
#if CONFIG_REVERSE_FILTER |
98 |
|
|
|
99 |
|
|
static int request_frame(AVFilterLink *outlink) |
100 |
|
|
{ |
101 |
|
|
AVFilterContext *ctx = outlink->src; |
102 |
|
|
ReverseContext *s = ctx->priv; |
103 |
|
|
int ret; |
104 |
|
|
|
105 |
|
|
ret = ff_request_frame(ctx->inputs[0]); |
106 |
|
|
|
107 |
|
|
if (ret == AVERROR_EOF && s->nb_frames > 0) { |
108 |
|
|
AVFrame *out = s->frames[s->nb_frames - 1]; |
109 |
|
|
out->pts = s->pts[s->flush_idx++]; |
110 |
|
|
ret = ff_filter_frame(outlink, out); |
111 |
|
|
s->frames[s->nb_frames - 1] = NULL; |
112 |
|
|
s->nb_frames--; |
113 |
|
|
} |
114 |
|
|
|
115 |
|
|
return ret; |
116 |
|
|
} |
117 |
|
|
|
118 |
|
|
static const AVFilterPad reverse_inputs[] = { |
119 |
|
|
{ |
120 |
|
|
.name = "default", |
121 |
|
|
.type = AVMEDIA_TYPE_VIDEO, |
122 |
|
|
.filter_frame = filter_frame, |
123 |
|
|
}, |
124 |
|
|
{ NULL } |
125 |
|
|
}; |
126 |
|
|
|
127 |
|
|
static const AVFilterPad reverse_outputs[] = { |
128 |
|
|
{ |
129 |
|
|
.name = "default", |
130 |
|
|
.type = AVMEDIA_TYPE_VIDEO, |
131 |
|
|
.request_frame = request_frame, |
132 |
|
|
}, |
133 |
|
|
{ NULL } |
134 |
|
|
}; |
135 |
|
|
|
136 |
|
|
AVFilter ff_vf_reverse = { |
137 |
|
|
.name = "reverse", |
138 |
|
|
.description = NULL_IF_CONFIG_SMALL("Reverse a clip."), |
139 |
|
|
.priv_size = sizeof(ReverseContext), |
140 |
|
|
.init = init, |
141 |
|
|
.uninit = uninit, |
142 |
|
|
.inputs = reverse_inputs, |
143 |
|
|
.outputs = reverse_outputs, |
144 |
|
|
}; |
145 |
|
|
|
146 |
|
|
#endif /* CONFIG_REVERSE_FILTER */ |
147 |
|
|
|
148 |
|
|
#if CONFIG_AREVERSE_FILTER |
149 |
|
|
|
150 |
|
|
static int query_formats(AVFilterContext *ctx) |
151 |
|
|
{ |
152 |
|
|
AVFilterFormats *formats; |
153 |
|
|
AVFilterChannelLayouts *layouts; |
154 |
|
|
int ret; |
155 |
|
|
|
156 |
|
|
layouts = ff_all_channel_counts(); |
157 |
|
|
if (!layouts) |
158 |
|
|
return AVERROR(ENOMEM); |
159 |
|
|
ret = ff_set_common_channel_layouts(ctx, layouts); |
160 |
|
|
if (ret < 0) |
161 |
|
|
return ret; |
162 |
|
|
|
163 |
|
|
ret = ff_set_common_formats(ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO)); |
164 |
|
|
if (ret < 0) |
165 |
|
|
return ret; |
166 |
|
|
|
167 |
|
|
formats = ff_all_samplerates(); |
168 |
|
|
if (!formats) |
169 |
|
|
return AVERROR(ENOMEM); |
170 |
|
|
return ff_set_common_samplerates(ctx, formats); |
171 |
|
|
} |
172 |
|
|
|
173 |
|
|
static void reverse_samples_planar(AVFrame *out) |
174 |
|
|
{ |
175 |
|
|
for (int p = 0; p < out->channels; p++) { |
176 |
|
|
switch (out->format) { |
177 |
|
|
case AV_SAMPLE_FMT_U8P: { |
178 |
|
|
uint8_t *dst = (uint8_t *)out->extended_data[p]; |
179 |
|
|
for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
180 |
|
|
FFSWAP(uint8_t, dst[i], dst[j]); |
181 |
|
|
} |
182 |
|
|
break; |
183 |
|
|
case AV_SAMPLE_FMT_S16P: { |
184 |
|
|
int16_t *dst = (int16_t *)out->extended_data[p]; |
185 |
|
|
for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
186 |
|
|
FFSWAP(int16_t, dst[i], dst[j]); |
187 |
|
|
} |
188 |
|
|
break; |
189 |
|
|
case AV_SAMPLE_FMT_S32P: { |
190 |
|
|
int32_t *dst = (int32_t *)out->extended_data[p]; |
191 |
|
|
for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
192 |
|
|
FFSWAP(int32_t, dst[i], dst[j]); |
193 |
|
|
} |
194 |
|
|
break; |
195 |
|
|
case AV_SAMPLE_FMT_FLTP: { |
196 |
|
|
float *dst = (float *)out->extended_data[p]; |
197 |
|
|
for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
198 |
|
|
FFSWAP(float, dst[i], dst[j]); |
199 |
|
|
} |
200 |
|
|
break; |
201 |
|
|
case AV_SAMPLE_FMT_DBLP: { |
202 |
|
|
double *dst = (double *)out->extended_data[p]; |
203 |
|
|
for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
204 |
|
|
FFSWAP(double, dst[i], dst[j]); |
205 |
|
|
} |
206 |
|
|
break; |
207 |
|
|
} |
208 |
|
|
} |
209 |
|
|
} |
210 |
|
|
|
211 |
|
|
static void reverse_samples_packed(AVFrame *out) |
212 |
|
|
{ |
213 |
|
|
const int channels = out->channels; |
214 |
|
|
|
215 |
|
|
switch (out->format) { |
216 |
|
|
case AV_SAMPLE_FMT_U8: { |
217 |
|
|
uint8_t *dst = (uint8_t *)out->extended_data[0]; |
218 |
|
|
for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
219 |
|
|
for (int p = 0; p < channels; p++) |
220 |
|
|
FFSWAP(uint8_t, dst[i * channels + p], dst[j * channels + p]); |
221 |
|
|
} |
222 |
|
|
break; |
223 |
|
|
case AV_SAMPLE_FMT_S16: { |
224 |
|
|
int16_t *dst = (int16_t *)out->extended_data[0]; |
225 |
|
|
for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
226 |
|
|
for (int p = 0; p < channels; p++) |
227 |
|
|
FFSWAP(int16_t, dst[i * channels + p], dst[j * channels + p]); |
228 |
|
|
} |
229 |
|
|
break; |
230 |
|
|
case AV_SAMPLE_FMT_S32: { |
231 |
|
|
int32_t *dst = (int32_t *)out->extended_data[0]; |
232 |
|
|
for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
233 |
|
|
for (int p = 0; p < channels; p++) |
234 |
|
|
FFSWAP(int32_t, dst[i * channels + p], dst[j * channels + p]); |
235 |
|
|
} |
236 |
|
|
break; |
237 |
|
|
case AV_SAMPLE_FMT_FLT: { |
238 |
|
|
float *dst = (float *)out->extended_data[0]; |
239 |
|
|
for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
240 |
|
|
for (int p = 0; p < channels; p++) |
241 |
|
|
FFSWAP(float, dst[i * channels + p], dst[j * channels + p]); |
242 |
|
|
} |
243 |
|
|
break; |
244 |
|
|
case AV_SAMPLE_FMT_DBL: { |
245 |
|
|
double *dst = (double *)out->extended_data[0]; |
246 |
|
|
for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
247 |
|
|
for (int p = 0; p < channels; p++) |
248 |
|
|
FFSWAP(double, dst[i * channels + p], dst[j * channels + p]); |
249 |
|
|
} |
250 |
|
|
break; |
251 |
|
|
} |
252 |
|
|
} |
253 |
|
|
|
254 |
|
|
static int areverse_request_frame(AVFilterLink *outlink) |
255 |
|
|
{ |
256 |
|
|
AVFilterContext *ctx = outlink->src; |
257 |
|
|
ReverseContext *s = ctx->priv; |
258 |
|
|
int ret; |
259 |
|
|
|
260 |
|
|
ret = ff_request_frame(ctx->inputs[0]); |
261 |
|
|
|
262 |
|
|
if (ret == AVERROR_EOF && s->nb_frames > 0) { |
263 |
|
|
AVFrame *out = s->frames[s->nb_frames - 1]; |
264 |
|
|
out->pts = s->pts[s->flush_idx++]; |
265 |
|
|
|
266 |
|
|
if (av_sample_fmt_is_planar(out->format)) |
267 |
|
|
reverse_samples_planar(out); |
268 |
|
|
else |
269 |
|
|
reverse_samples_packed(out); |
270 |
|
|
ret = ff_filter_frame(outlink, out); |
271 |
|
|
s->frames[s->nb_frames - 1] = NULL; |
272 |
|
|
s->nb_frames--; |
273 |
|
|
} |
274 |
|
|
|
275 |
|
|
return ret; |
276 |
|
|
} |
277 |
|
|
|
278 |
|
|
static const AVFilterPad areverse_inputs[] = { |
279 |
|
|
{ |
280 |
|
|
.name = "default", |
281 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
282 |
|
|
.filter_frame = filter_frame, |
283 |
|
|
.needs_writable = 1, |
284 |
|
|
}, |
285 |
|
|
{ NULL } |
286 |
|
|
}; |
287 |
|
|
|
288 |
|
|
static const AVFilterPad areverse_outputs[] = { |
289 |
|
|
{ |
290 |
|
|
.name = "default", |
291 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
292 |
|
|
.request_frame = areverse_request_frame, |
293 |
|
|
}, |
294 |
|
|
{ NULL } |
295 |
|
|
}; |
296 |
|
|
|
297 |
|
|
AVFilter ff_af_areverse = { |
298 |
|
|
.name = "areverse", |
299 |
|
|
.description = NULL_IF_CONFIG_SMALL("Reverse an audio clip."), |
300 |
|
|
.query_formats = query_formats, |
301 |
|
|
.priv_size = sizeof(ReverseContext), |
302 |
|
|
.init = init, |
303 |
|
|
.uninit = uninit, |
304 |
|
|
.inputs = areverse_inputs, |
305 |
|
|
.outputs = areverse_outputs, |
306 |
|
|
}; |
307 |
|
|
|
308 |
|
|
#endif /* CONFIG_AREVERSE_FILTER */ |