1 |
|
|
/* |
2 |
|
|
* Copyright (c) 2011 Stefano Sabatini |
3 |
|
|
* |
4 |
|
|
* This file is part of FFmpeg. |
5 |
|
|
* |
6 |
|
|
* FFmpeg is free software; you can redistribute it and/or |
7 |
|
|
* modify it under the terms of the GNU Lesser General Public |
8 |
|
|
* License as published by the Free Software Foundation; either |
9 |
|
|
* version 2.1 of the License, or (at your option) any later version. |
10 |
|
|
* |
11 |
|
|
* FFmpeg is distributed in the hope that it will be useful, |
12 |
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 |
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 |
|
|
* Lesser General Public License for more details. |
15 |
|
|
* |
16 |
|
|
* You should have received a copy of the GNU Lesser General Public |
17 |
|
|
* License along with FFmpeg; if not, write to the Free Software |
18 |
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 |
|
|
*/ |
20 |
|
|
|
21 |
|
|
/** |
22 |
|
|
* @file |
23 |
|
|
* filter for selecting which frame passes in the filterchain |
24 |
|
|
*/ |
25 |
|
|
|
26 |
|
|
#include "libavutil/avstring.h" |
27 |
|
|
#include "libavutil/eval.h" |
28 |
|
|
#include "libavutil/fifo.h" |
29 |
|
|
#include "libavutil/imgutils.h" |
30 |
|
|
#include "libavutil/internal.h" |
31 |
|
|
#include "libavutil/opt.h" |
32 |
|
|
#include "libavutil/pixdesc.h" |
33 |
|
|
#include "avfilter.h" |
34 |
|
|
#include "audio.h" |
35 |
|
|
#include "formats.h" |
36 |
|
|
#include "internal.h" |
37 |
|
|
#include "video.h" |
38 |
|
|
#include "scene_sad.h" |
39 |
|
|
|
40 |
|
|
static const char *const var_names[] = { |
41 |
|
|
"TB", ///< timebase |
42 |
|
|
|
43 |
|
|
"pts", ///< original pts in the file of the frame |
44 |
|
|
"start_pts", ///< first PTS in the stream, expressed in TB units |
45 |
|
|
"prev_pts", ///< previous frame PTS |
46 |
|
|
"prev_selected_pts", ///< previous selected frame PTS |
47 |
|
|
|
48 |
|
|
"t", ///< timestamp expressed in seconds |
49 |
|
|
"start_t", ///< first PTS in the stream, expressed in seconds |
50 |
|
|
"prev_t", ///< previous frame time |
51 |
|
|
"prev_selected_t", ///< previously selected time |
52 |
|
|
|
53 |
|
|
"pict_type", ///< the type of picture in the movie |
54 |
|
|
"I", |
55 |
|
|
"P", |
56 |
|
|
"B", |
57 |
|
|
"S", |
58 |
|
|
"SI", |
59 |
|
|
"SP", |
60 |
|
|
"BI", |
61 |
|
|
"PICT_TYPE_I", |
62 |
|
|
"PICT_TYPE_P", |
63 |
|
|
"PICT_TYPE_B", |
64 |
|
|
"PICT_TYPE_S", |
65 |
|
|
"PICT_TYPE_SI", |
66 |
|
|
"PICT_TYPE_SP", |
67 |
|
|
"PICT_TYPE_BI", |
68 |
|
|
|
69 |
|
|
"interlace_type", ///< the frame interlace type |
70 |
|
|
"PROGRESSIVE", |
71 |
|
|
"TOPFIRST", |
72 |
|
|
"BOTTOMFIRST", |
73 |
|
|
|
74 |
|
|
"consumed_samples_n",///< number of samples consumed by the filter (only audio) |
75 |
|
|
"samples_n", ///< number of samples in the current frame (only audio) |
76 |
|
|
"sample_rate", ///< sample rate (only audio) |
77 |
|
|
|
78 |
|
|
"n", ///< frame number (starting from zero) |
79 |
|
|
"selected_n", ///< selected frame number (starting from zero) |
80 |
|
|
"prev_selected_n", ///< number of the last selected frame |
81 |
|
|
|
82 |
|
|
"key", ///< tell if the frame is a key frame |
83 |
|
|
"pos", ///< original position in the file of the frame |
84 |
|
|
|
85 |
|
|
"scene", |
86 |
|
|
|
87 |
|
|
"concatdec_select", ///< frame is within the interval set by the concat demuxer |
88 |
|
|
|
89 |
|
|
NULL |
90 |
|
|
}; |
91 |
|
|
|
92 |
|
|
enum var_name { |
93 |
|
|
VAR_TB, |
94 |
|
|
|
95 |
|
|
VAR_PTS, |
96 |
|
|
VAR_START_PTS, |
97 |
|
|
VAR_PREV_PTS, |
98 |
|
|
VAR_PREV_SELECTED_PTS, |
99 |
|
|
|
100 |
|
|
VAR_T, |
101 |
|
|
VAR_START_T, |
102 |
|
|
VAR_PREV_T, |
103 |
|
|
VAR_PREV_SELECTED_T, |
104 |
|
|
|
105 |
|
|
VAR_PICT_TYPE, |
106 |
|
|
VAR_I, |
107 |
|
|
VAR_P, |
108 |
|
|
VAR_B, |
109 |
|
|
VAR_S, |
110 |
|
|
VAR_SI, |
111 |
|
|
VAR_SP, |
112 |
|
|
VAR_BI, |
113 |
|
|
VAR_PICT_TYPE_I, |
114 |
|
|
VAR_PICT_TYPE_P, |
115 |
|
|
VAR_PICT_TYPE_B, |
116 |
|
|
VAR_PICT_TYPE_S, |
117 |
|
|
VAR_PICT_TYPE_SI, |
118 |
|
|
VAR_PICT_TYPE_SP, |
119 |
|
|
VAR_PICT_TYPE_BI, |
120 |
|
|
|
121 |
|
|
VAR_INTERLACE_TYPE, |
122 |
|
|
VAR_INTERLACE_TYPE_P, |
123 |
|
|
VAR_INTERLACE_TYPE_T, |
124 |
|
|
VAR_INTERLACE_TYPE_B, |
125 |
|
|
|
126 |
|
|
VAR_CONSUMED_SAMPLES_N, |
127 |
|
|
VAR_SAMPLES_N, |
128 |
|
|
VAR_SAMPLE_RATE, |
129 |
|
|
|
130 |
|
|
VAR_N, |
131 |
|
|
VAR_SELECTED_N, |
132 |
|
|
VAR_PREV_SELECTED_N, |
133 |
|
|
|
134 |
|
|
VAR_KEY, |
135 |
|
|
VAR_POS, |
136 |
|
|
|
137 |
|
|
VAR_SCENE, |
138 |
|
|
|
139 |
|
|
VAR_CONCATDEC_SELECT, |
140 |
|
|
|
141 |
|
|
VAR_VARS_NB |
142 |
|
|
}; |
143 |
|
|
|
144 |
|
|
typedef struct SelectContext { |
145 |
|
|
const AVClass *class; |
146 |
|
|
char *expr_str; |
147 |
|
|
AVExpr *expr; |
148 |
|
|
double var_values[VAR_VARS_NB]; |
149 |
|
|
int bitdepth; |
150 |
|
|
int nb_planes; |
151 |
|
|
ptrdiff_t width[4]; |
152 |
|
|
ptrdiff_t height[4]; |
153 |
|
|
int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise |
154 |
|
|
ff_scene_sad_fn sad; ///< Sum of the absolute difference function (scene detect only) |
155 |
|
|
double prev_mafd; ///< previous MAFD (scene detect only) |
156 |
|
|
AVFrame *prev_picref; ///< previous frame (scene detect only) |
157 |
|
|
double select; |
158 |
|
|
int select_out; ///< mark the selected output pad index |
159 |
|
|
int nb_outputs; |
160 |
|
|
} SelectContext; |
161 |
|
|
|
162 |
|
|
#define OFFSET(x) offsetof(SelectContext, x) |
163 |
|
|
#define DEFINE_OPTIONS(filt_name, FLAGS) \ |
164 |
|
|
static const AVOption filt_name##_options[] = { \ |
165 |
|
|
{ "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \ |
166 |
|
|
{ "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \ |
167 |
|
|
{ "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \ |
168 |
|
|
{ "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \ |
169 |
|
|
{ NULL } \ |
170 |
|
|
} |
171 |
|
|
|
172 |
|
|
static int request_frame(AVFilterLink *outlink); |
173 |
|
|
|
174 |
|
3 |
static av_cold int init(AVFilterContext *ctx) |
175 |
|
|
{ |
176 |
|
3 |
SelectContext *select = ctx->priv; |
177 |
|
|
int i, ret; |
178 |
|
|
|
179 |
✗✓ |
3 |
if ((ret = av_expr_parse(&select->expr, select->expr_str, |
180 |
|
|
var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { |
181 |
|
|
av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", |
182 |
|
|
select->expr_str); |
183 |
|
|
return ret; |
184 |
|
|
} |
185 |
|
3 |
select->do_scene_detect = !!strstr(select->expr_str, "scene"); |
186 |
|
|
|
187 |
✓✓ |
6 |
for (i = 0; i < select->nb_outputs; i++) { |
188 |
|
3 |
AVFilterPad pad = { 0 }; |
189 |
|
|
|
190 |
|
3 |
pad.name = av_asprintf("output%d", i); |
191 |
✗✓ |
3 |
if (!pad.name) |
192 |
|
|
return AVERROR(ENOMEM); |
193 |
|
3 |
pad.type = ctx->filter->inputs[0].type; |
194 |
|
3 |
pad.request_frame = request_frame; |
195 |
✗✓ |
3 |
if ((ret = ff_insert_outpad(ctx, i, &pad)) < 0) { |
196 |
|
|
av_freep(&pad.name); |
197 |
|
|
return ret; |
198 |
|
|
} |
199 |
|
|
} |
200 |
|
|
|
201 |
|
3 |
return 0; |
202 |
|
|
} |
203 |
|
|
|
204 |
|
|
#define INTERLACE_TYPE_P 0 |
205 |
|
|
#define INTERLACE_TYPE_T 1 |
206 |
|
|
#define INTERLACE_TYPE_B 2 |
207 |
|
|
|
208 |
|
3 |
static int config_input(AVFilterLink *inlink) |
209 |
|
|
{ |
210 |
|
3 |
SelectContext *select = inlink->dst->priv; |
211 |
|
3 |
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
212 |
|
9 |
int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && |
213 |
✓✗✓✓
|
5 |
(desc->flags & AV_PIX_FMT_FLAG_PLANAR) && |
214 |
✓✗ |
2 |
desc->nb_components >= 3; |
215 |
|
|
|
216 |
|
3 |
select->bitdepth = desc->comp[0].depth; |
217 |
✓✓ |
3 |
select->nb_planes = is_yuv ? 1 : av_pix_fmt_count_planes(inlink->format); |
218 |
|
|
|
219 |
✓✓ |
6 |
for (int plane = 0; plane < select->nb_planes; plane++) { |
220 |
|
3 |
ptrdiff_t line_size = av_image_get_linesize(inlink->format, inlink->w, plane); |
221 |
|
3 |
int vsub = desc->log2_chroma_h; |
222 |
|
|
|
223 |
|
3 |
select->width[plane] = line_size >> (select->bitdepth > 8); |
224 |
✓✗✗✓
|
3 |
select->height[plane] = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT(inlink->h, vsub) : inlink->h; |
225 |
|
|
} |
226 |
|
|
|
227 |
|
3 |
select->var_values[VAR_N] = 0.0; |
228 |
|
3 |
select->var_values[VAR_SELECTED_N] = 0.0; |
229 |
|
|
|
230 |
|
3 |
select->var_values[VAR_TB] = av_q2d(inlink->time_base); |
231 |
|
|
|
232 |
|
3 |
select->var_values[VAR_PREV_PTS] = NAN; |
233 |
|
3 |
select->var_values[VAR_PREV_SELECTED_PTS] = NAN; |
234 |
|
3 |
select->var_values[VAR_PREV_SELECTED_T] = NAN; |
235 |
|
3 |
select->var_values[VAR_PREV_T] = NAN; |
236 |
|
3 |
select->var_values[VAR_START_PTS] = NAN; |
237 |
|
3 |
select->var_values[VAR_START_T] = NAN; |
238 |
|
|
|
239 |
|
3 |
select->var_values[VAR_I] = AV_PICTURE_TYPE_I; |
240 |
|
3 |
select->var_values[VAR_P] = AV_PICTURE_TYPE_P; |
241 |
|
3 |
select->var_values[VAR_B] = AV_PICTURE_TYPE_B; |
242 |
|
3 |
select->var_values[VAR_SI] = AV_PICTURE_TYPE_SI; |
243 |
|
3 |
select->var_values[VAR_SP] = AV_PICTURE_TYPE_SP; |
244 |
|
3 |
select->var_values[VAR_BI] = AV_PICTURE_TYPE_BI; |
245 |
|
3 |
select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I; |
246 |
|
3 |
select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P; |
247 |
|
3 |
select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B; |
248 |
|
3 |
select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI; |
249 |
|
3 |
select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP; |
250 |
|
3 |
select->var_values[VAR_PICT_TYPE_BI] = AV_PICTURE_TYPE_BI; |
251 |
|
|
|
252 |
|
3 |
select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P; |
253 |
|
3 |
select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T; |
254 |
|
3 |
select->var_values[VAR_INTERLACE_TYPE_B] = INTERLACE_TYPE_B; |
255 |
|
|
|
256 |
|
3 |
select->var_values[VAR_PICT_TYPE] = NAN; |
257 |
|
3 |
select->var_values[VAR_INTERLACE_TYPE] = NAN; |
258 |
|
3 |
select->var_values[VAR_SCENE] = NAN; |
259 |
|
3 |
select->var_values[VAR_CONSUMED_SAMPLES_N] = NAN; |
260 |
|
3 |
select->var_values[VAR_SAMPLES_N] = NAN; |
261 |
|
|
|
262 |
|
3 |
select->var_values[VAR_SAMPLE_RATE] = |
263 |
✗✓ |
3 |
inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN; |
264 |
|
|
|
265 |
✓✓ |
3 |
if (CONFIG_SELECT_FILTER && select->do_scene_detect) { |
266 |
✓✗ |
1 |
select->sad = ff_scene_sad_get_fn(select->bitdepth == 8 ? 8 : 16); |
267 |
✗✓ |
1 |
if (!select->sad) |
268 |
|
|
return AVERROR(EINVAL); |
269 |
|
|
} |
270 |
|
3 |
return 0; |
271 |
|
|
} |
272 |
|
|
|
273 |
|
1308 |
static double get_scene_score(AVFilterContext *ctx, AVFrame *frame) |
274 |
|
|
{ |
275 |
|
1308 |
double ret = 0; |
276 |
|
1308 |
SelectContext *select = ctx->priv; |
277 |
|
1308 |
AVFrame *prev_picref = select->prev_picref; |
278 |
|
|
|
279 |
✓✓ |
1308 |
if (prev_picref && |
280 |
✓✗ |
1307 |
frame->height == prev_picref->height && |
281 |
✓✗ |
1307 |
frame->width == prev_picref->width) { |
282 |
|
1307 |
uint64_t sad = 0; |
283 |
|
|
double mafd, diff; |
284 |
|
1307 |
uint64_t count = 0; |
285 |
|
|
|
286 |
✓✓ |
2614 |
for (int plane = 0; plane < select->nb_planes; plane++) { |
287 |
|
|
uint64_t plane_sad; |
288 |
|
1307 |
select->sad(prev_picref->data[plane], prev_picref->linesize[plane], |
289 |
|
1307 |
frame->data[plane], frame->linesize[plane], |
290 |
|
|
select->width[plane], select->height[plane], &plane_sad); |
291 |
|
1307 |
sad += plane_sad; |
292 |
|
1307 |
count += select->width[plane] * select->height[plane]; |
293 |
|
|
} |
294 |
|
|
|
295 |
|
1307 |
emms_c(); |
296 |
|
1307 |
mafd = (double)sad / count / (1ULL << (select->bitdepth - 8)); |
297 |
|
1307 |
diff = fabs(mafd - select->prev_mafd); |
298 |
✓✓ |
1307 |
ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1); |
299 |
|
1307 |
select->prev_mafd = mafd; |
300 |
|
1307 |
av_frame_free(&prev_picref); |
301 |
|
|
} |
302 |
|
1308 |
select->prev_picref = av_frame_clone(frame); |
303 |
|
1308 |
return ret; |
304 |
|
|
} |
305 |
|
|
|
306 |
|
1408 |
static double get_concatdec_select(AVFrame *frame, int64_t pts) |
307 |
|
|
{ |
308 |
|
1408 |
AVDictionary *metadata = frame->metadata; |
309 |
|
1408 |
AVDictionaryEntry *start_time_entry = av_dict_get(metadata, "lavf.concatdec.start_time", NULL, 0); |
310 |
|
1408 |
AVDictionaryEntry *duration_entry = av_dict_get(metadata, "lavf.concatdec.duration", NULL, 0); |
311 |
✗✓ |
1408 |
if (start_time_entry) { |
312 |
|
|
int64_t start_time = strtoll(start_time_entry->value, NULL, 10); |
313 |
|
|
if (pts >= start_time) { |
314 |
|
|
if (duration_entry) { |
315 |
|
|
int64_t duration = strtoll(duration_entry->value, NULL, 10); |
316 |
|
|
if (pts < start_time + duration) |
317 |
|
|
return -1; |
318 |
|
|
else |
319 |
|
|
return 0; |
320 |
|
|
} |
321 |
|
|
return -1; |
322 |
|
|
} |
323 |
|
|
return 0; |
324 |
|
|
} |
325 |
|
1408 |
return NAN; |
326 |
|
|
} |
327 |
|
|
|
328 |
|
1408 |
static void select_frame(AVFilterContext *ctx, AVFrame *frame) |
329 |
|
|
{ |
330 |
|
1408 |
SelectContext *select = ctx->priv; |
331 |
|
1408 |
AVFilterLink *inlink = ctx->inputs[0]; |
332 |
|
|
double res; |
333 |
|
|
|
334 |
✓✓ |
1408 |
if (isnan(select->var_values[VAR_START_PTS])) |
335 |
✓✗ |
3 |
select->var_values[VAR_START_PTS] = TS2D(frame->pts); |
336 |
✓✓ |
1408 |
if (isnan(select->var_values[VAR_START_T])) |
337 |
✓✗ |
3 |
select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base); |
338 |
|
|
|
339 |
|
1408 |
select->var_values[VAR_N ] = inlink->frame_count_out; |
340 |
✓✗ |
1408 |
select->var_values[VAR_PTS] = TS2D(frame->pts); |
341 |
✓✗ |
1408 |
select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base); |
342 |
✓✓ |
1408 |
select->var_values[VAR_POS] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos; |
343 |
|
1408 |
select->var_values[VAR_KEY] = frame->key_frame; |
344 |
|
1408 |
select->var_values[VAR_CONCATDEC_SELECT] = get_concatdec_select(frame, av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q)); |
345 |
|
|
|
346 |
✗✓✗ |
1408 |
switch (inlink->type) { |
347 |
|
|
case AVMEDIA_TYPE_AUDIO: |
348 |
|
|
select->var_values[VAR_SAMPLES_N] = frame->nb_samples; |
349 |
|
|
break; |
350 |
|
|
|
351 |
|
1408 |
case AVMEDIA_TYPE_VIDEO: |
352 |
|
1408 |
select->var_values[VAR_INTERLACE_TYPE] = |
353 |
✗✓ |
1408 |
!frame->interlaced_frame ? INTERLACE_TYPE_P : |
354 |
|
|
frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B; |
355 |
|
1408 |
select->var_values[VAR_PICT_TYPE] = frame->pict_type; |
356 |
✓✓ |
1408 |
if (select->do_scene_detect) { |
357 |
|
|
char buf[32]; |
358 |
|
1308 |
select->var_values[VAR_SCENE] = get_scene_score(ctx, frame); |
359 |
|
|
// TODO: document metadata |
360 |
|
1308 |
snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]); |
361 |
|
1308 |
av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0); |
362 |
|
|
} |
363 |
|
1408 |
break; |
364 |
|
|
} |
365 |
|
|
|
366 |
|
1408 |
select->select = res = av_expr_eval(select->expr, select->var_values, NULL); |
367 |
|
1408 |
av_log(inlink->dst, AV_LOG_DEBUG, |
368 |
|
|
"n:%f pts:%f t:%f key:%d", |
369 |
|
|
select->var_values[VAR_N], |
370 |
|
|
select->var_values[VAR_PTS], |
371 |
|
|
select->var_values[VAR_T], |
372 |
|
|
frame->key_frame); |
373 |
|
|
|
374 |
✓✗✗ |
1408 |
switch (inlink->type) { |
375 |
|
1408 |
case AVMEDIA_TYPE_VIDEO: |
376 |
|
1408 |
av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f", |
377 |
✗✓ |
1408 |
(!frame->interlaced_frame) ? 'P' : |
378 |
|
|
frame->top_field_first ? 'T' : 'B', |
379 |
|
1408 |
av_get_picture_type_char(frame->pict_type), |
380 |
|
|
select->var_values[VAR_SCENE]); |
381 |
|
1408 |
break; |
382 |
|
|
case AVMEDIA_TYPE_AUDIO: |
383 |
|
|
av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f", |
384 |
|
|
frame->nb_samples, |
385 |
|
|
select->var_values[VAR_CONSUMED_SAMPLES_N]); |
386 |
|
|
break; |
387 |
|
|
} |
388 |
|
|
|
389 |
✓✓ |
1408 |
if (res == 0) { |
390 |
|
1355 |
select->select_out = -1; /* drop */ |
391 |
✓✗✗✓
|
53 |
} else if (isnan(res) || res < 0) { |
392 |
|
|
select->select_out = 0; /* first output */ |
393 |
|
|
} else { |
394 |
✗✓ |
53 |
select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */ |
395 |
|
|
} |
396 |
|
|
|
397 |
|
1408 |
av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out); |
398 |
|
|
|
399 |
✓✓ |
1408 |
if (res) { |
400 |
|
53 |
select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N]; |
401 |
|
53 |
select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS]; |
402 |
|
53 |
select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T]; |
403 |
|
53 |
select->var_values[VAR_SELECTED_N] += 1.0; |
404 |
✗✓ |
53 |
if (inlink->type == AVMEDIA_TYPE_AUDIO) |
405 |
|
|
select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples; |
406 |
|
|
} |
407 |
|
|
|
408 |
|
1408 |
select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS]; |
409 |
|
1408 |
select->var_values[VAR_PREV_T] = select->var_values[VAR_T]; |
410 |
|
1408 |
} |
411 |
|
|
|
412 |
|
1408 |
static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
413 |
|
|
{ |
414 |
|
1408 |
AVFilterContext *ctx = inlink->dst; |
415 |
|
1408 |
SelectContext *select = ctx->priv; |
416 |
|
|
|
417 |
|
1408 |
select_frame(ctx, frame); |
418 |
✓✓ |
1408 |
if (select->select) |
419 |
|
53 |
return ff_filter_frame(ctx->outputs[select->select_out], frame); |
420 |
|
|
|
421 |
|
1355 |
av_frame_free(&frame); |
422 |
|
1355 |
return 0; |
423 |
|
|
} |
424 |
|
|
|
425 |
|
1412 |
static int request_frame(AVFilterLink *outlink) |
426 |
|
|
{ |
427 |
|
1412 |
AVFilterLink *inlink = outlink->src->inputs[0]; |
428 |
|
1412 |
int ret = ff_request_frame(inlink); |
429 |
|
1412 |
return ret; |
430 |
|
|
} |
431 |
|
|
|
432 |
|
3 |
static av_cold void uninit(AVFilterContext *ctx) |
433 |
|
|
{ |
434 |
|
3 |
SelectContext *select = ctx->priv; |
435 |
|
|
int i; |
436 |
|
|
|
437 |
|
3 |
av_expr_free(select->expr); |
438 |
|
3 |
select->expr = NULL; |
439 |
|
|
|
440 |
✓✓ |
6 |
for (i = 0; i < ctx->nb_outputs; i++) |
441 |
|
3 |
av_freep(&ctx->output_pads[i].name); |
442 |
|
|
|
443 |
✓✓ |
3 |
if (select->do_scene_detect) { |
444 |
|
1 |
av_frame_free(&select->prev_picref); |
445 |
|
|
} |
446 |
|
3 |
} |
447 |
|
|
|
448 |
|
|
#if CONFIG_ASELECT_FILTER |
449 |
|
|
|
450 |
|
|
DEFINE_OPTIONS(aselect, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM); |
451 |
|
|
AVFILTER_DEFINE_CLASS(aselect); |
452 |
|
|
|
453 |
|
|
static av_cold int aselect_init(AVFilterContext *ctx) |
454 |
|
|
{ |
455 |
|
|
SelectContext *select = ctx->priv; |
456 |
|
|
int ret; |
457 |
|
|
|
458 |
|
|
if ((ret = init(ctx)) < 0) |
459 |
|
|
return ret; |
460 |
|
|
|
461 |
|
|
if (select->do_scene_detect) { |
462 |
|
|
av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n"); |
463 |
|
|
return AVERROR(EINVAL); |
464 |
|
|
} |
465 |
|
|
|
466 |
|
|
return 0; |
467 |
|
|
} |
468 |
|
|
|
469 |
|
|
static const AVFilterPad avfilter_af_aselect_inputs[] = { |
470 |
|
|
{ |
471 |
|
|
.name = "default", |
472 |
|
|
.type = AVMEDIA_TYPE_AUDIO, |
473 |
|
|
.config_props = config_input, |
474 |
|
|
.filter_frame = filter_frame, |
475 |
|
|
}, |
476 |
|
|
{ NULL } |
477 |
|
|
}; |
478 |
|
|
|
479 |
|
|
AVFilter ff_af_aselect = { |
480 |
|
|
.name = "aselect", |
481 |
|
|
.description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."), |
482 |
|
|
.init = aselect_init, |
483 |
|
|
.uninit = uninit, |
484 |
|
|
.priv_size = sizeof(SelectContext), |
485 |
|
|
.inputs = avfilter_af_aselect_inputs, |
486 |
|
|
.priv_class = &aselect_class, |
487 |
|
|
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS, |
488 |
|
|
}; |
489 |
|
|
#endif /* CONFIG_ASELECT_FILTER */ |
490 |
|
|
|
491 |
|
|
#if CONFIG_SELECT_FILTER |
492 |
|
|
|
493 |
|
3 |
static int query_formats(AVFilterContext *ctx) |
494 |
|
|
{ |
495 |
|
3 |
SelectContext *select = ctx->priv; |
496 |
|
|
|
497 |
✓✓ |
3 |
if (!select->do_scene_detect) { |
498 |
|
2 |
return ff_default_query_formats(ctx); |
499 |
|
|
} else { |
500 |
|
|
int ret; |
501 |
|
|
static const enum AVPixelFormat pix_fmts[] = { |
502 |
|
|
AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGBA, |
503 |
|
|
AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, AV_PIX_FMT_GRAY8, |
504 |
|
|
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P, |
505 |
|
|
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P, |
506 |
|
|
AV_PIX_FMT_YUV420P10, |
507 |
|
|
AV_PIX_FMT_NONE |
508 |
|
|
}; |
509 |
|
1 |
AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts); |
510 |
|
|
|
511 |
✗✓ |
1 |
if (!fmts_list) |
512 |
|
|
return AVERROR(ENOMEM); |
513 |
|
1 |
ret = ff_set_common_formats(ctx, fmts_list); |
514 |
✗✓ |
1 |
if (ret < 0) |
515 |
|
|
return ret; |
516 |
|
|
} |
517 |
|
1 |
return 0; |
518 |
|
|
} |
519 |
|
|
|
520 |
|
|
DEFINE_OPTIONS(select, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM); |
521 |
|
|
AVFILTER_DEFINE_CLASS(select); |
522 |
|
|
|
523 |
|
3 |
static av_cold int select_init(AVFilterContext *ctx) |
524 |
|
|
{ |
525 |
|
|
int ret; |
526 |
|
|
|
527 |
✗✓ |
3 |
if ((ret = init(ctx)) < 0) |
528 |
|
|
return ret; |
529 |
|
|
|
530 |
|
3 |
return 0; |
531 |
|
|
} |
532 |
|
|
|
533 |
|
|
static const AVFilterPad avfilter_vf_select_inputs[] = { |
534 |
|
|
{ |
535 |
|
|
.name = "default", |
536 |
|
|
.type = AVMEDIA_TYPE_VIDEO, |
537 |
|
|
.config_props = config_input, |
538 |
|
|
.filter_frame = filter_frame, |
539 |
|
|
}, |
540 |
|
|
{ NULL } |
541 |
|
|
}; |
542 |
|
|
|
543 |
|
|
AVFilter ff_vf_select = { |
544 |
|
|
.name = "select", |
545 |
|
|
.description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."), |
546 |
|
|
.init = select_init, |
547 |
|
|
.uninit = uninit, |
548 |
|
|
.query_formats = query_formats, |
549 |
|
|
.priv_size = sizeof(SelectContext), |
550 |
|
|
.priv_class = &select_class, |
551 |
|
|
.inputs = avfilter_vf_select_inputs, |
552 |
|
|
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS, |
553 |
|
|
}; |
554 |
|
|
#endif /* CONFIG_SELECT_FILTER */ |