FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/avf_avectorscope.c
Date: 2024-11-20 23:03:26
Exec Total Coverage
Lines: 0 231 0.0%
Functions: 0 10 0.0%
Branches: 0 156 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2013 Paul B Mahol
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * audio to video multimedia vectorscope filter
24 */
25
26 #include "libavutil/avassert.h"
27 #include "libavutil/channel_layout.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/parseutils.h"
30 #include "avfilter.h"
31 #include "filters.h"
32 #include "formats.h"
33 #include "audio.h"
34 #include "video.h"
35
36 enum VectorScopeMode {
37 LISSAJOUS,
38 LISSAJOUS_XY,
39 POLAR,
40 MODE_NB,
41 };
42
43 enum VectorScopeDraw {
44 DOT,
45 LINE,
46 AALINE,
47 DRAW_NB,
48 };
49
50 enum VectorScopeScale {
51 LIN,
52 SQRT,
53 CBRT,
54 LOG,
55 SCALE_NB,
56 };
57
58 typedef struct AudioVectorScopeContext {
59 const AVClass *class;
60 AVFrame *outpicref;
61 int w, h;
62 int hw, hh;
63 int mode;
64 int draw;
65 int scale;
66 int contrast[4];
67 int fade[4];
68 double zoom;
69 int swap;
70 int mirror;
71 unsigned prev_x, prev_y;
72 AVRational frame_rate;
73 int nb_samples;
74 } AudioVectorScopeContext;
75
76 #define OFFSET(x) offsetof(AudioVectorScopeContext, x)
77 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
78 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
79
80 static const AVOption avectorscope_options[] = {
81 { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=LISSAJOUS}, 0, MODE_NB-1, TFLAGS, .unit = "mode" },
82 { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=LISSAJOUS}, 0, MODE_NB-1, TFLAGS, .unit = "mode" },
83 { "lissajous", "", 0, AV_OPT_TYPE_CONST, {.i64=LISSAJOUS}, 0, 0, TFLAGS, .unit = "mode" },
84 { "lissajous_xy", "", 0, AV_OPT_TYPE_CONST, {.i64=LISSAJOUS_XY}, 0, 0, TFLAGS, .unit = "mode" },
85 { "polar", "", 0, AV_OPT_TYPE_CONST, {.i64=POLAR}, 0, 0, TFLAGS, .unit = "mode" },
86 { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
87 { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
88 { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="400x400"}, 0, 0, FLAGS },
89 { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="400x400"}, 0, 0, FLAGS },
90 { "rc", "set red contrast", OFFSET(contrast[0]), AV_OPT_TYPE_INT, {.i64=40}, 0, 255, TFLAGS },
91 { "gc", "set green contrast", OFFSET(contrast[1]), AV_OPT_TYPE_INT, {.i64=160}, 0, 255, TFLAGS },
92 { "bc", "set blue contrast", OFFSET(contrast[2]), AV_OPT_TYPE_INT, {.i64=80}, 0, 255, TFLAGS },
93 { "ac", "set alpha contrast", OFFSET(contrast[3]), AV_OPT_TYPE_INT, {.i64=255}, 0, 255, TFLAGS },
94 { "rf", "set red fade", OFFSET(fade[0]), AV_OPT_TYPE_INT, {.i64=15}, 0, 255, TFLAGS },
95 { "gf", "set green fade", OFFSET(fade[1]), AV_OPT_TYPE_INT, {.i64=10}, 0, 255, TFLAGS },
96 { "bf", "set blue fade", OFFSET(fade[2]), AV_OPT_TYPE_INT, {.i64=5}, 0, 255, TFLAGS },
97 { "af", "set alpha fade", OFFSET(fade[3]), AV_OPT_TYPE_INT, {.i64=5}, 0, 255, TFLAGS },
98 { "zoom", "set zoom factor", OFFSET(zoom), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 10, TFLAGS },
99 { "draw", "set draw mode", OFFSET(draw), AV_OPT_TYPE_INT, {.i64=DOT}, 0, DRAW_NB-1, TFLAGS, .unit = "draw" },
100 { "dot", "draw dots", 0, AV_OPT_TYPE_CONST, {.i64=DOT} , 0, 0, TFLAGS, .unit = "draw" },
101 { "line", "draw lines", 0, AV_OPT_TYPE_CONST, {.i64=LINE}, 0, 0, TFLAGS, .unit = "draw" },
102 { "aaline","draw anti-aliased lines", 0, AV_OPT_TYPE_CONST, {.i64=AALINE},0,0, TFLAGS, .unit = "draw" },
103 { "scale", "set amplitude scale mode", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=LIN}, 0, SCALE_NB-1, TFLAGS, .unit = "scale" },
104 { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LIN}, 0, 0, TFLAGS, .unit = "scale" },
105 { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, TFLAGS, .unit = "scale" },
106 { "cbrt", "cube root", 0, AV_OPT_TYPE_CONST, {.i64=CBRT}, 0, 0, TFLAGS, .unit = "scale" },
107 { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=LOG}, 0, 0, TFLAGS, .unit = "scale" },
108 { "swap", "swap x axis with y axis", OFFSET(swap), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, TFLAGS },
109 { "mirror", "mirror axis", OFFSET(mirror), AV_OPT_TYPE_INT, {.i64=0}, 0, 3, TFLAGS, .unit = "mirror" },
110 { "none", "no mirror", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, TFLAGS, .unit = "mirror" },
111 { "x", "mirror x", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, TFLAGS, .unit = "mirror" },
112 { "y", "mirror y", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, TFLAGS, .unit = "mirror" },
113 { "xy", "mirror both", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, TFLAGS, .unit = "mirror" },
114 { NULL }
115 };
116
117 AVFILTER_DEFINE_CLASS(avectorscope);
118
119 static void draw_dot(AudioVectorScopeContext *s, unsigned x, unsigned y, int value)
120 {
121 const ptrdiff_t linesize = s->outpicref->linesize[0];
122 uint8_t *dst;
123
124 if (s->zoom > 1) {
125 if (y >= s->h || x >= s->w)
126 return;
127 } else {
128 y = FFMIN(y, s->h - 1);
129 x = FFMIN(x, s->w - 1);
130 }
131
132 dst = s->outpicref->data[0] + y * linesize + x * 4;
133 dst[0] = FFMIN(dst[0] + s->contrast[0], value);
134 dst[1] = FFMIN(dst[1] + s->contrast[1], value);
135 dst[2] = FFMIN(dst[2] + s->contrast[2], value);
136 dst[3] = FFMIN(dst[3] + s->contrast[3], value);
137 }
138
139 static void draw_line(AudioVectorScopeContext *s, int x0, int y0, int x1, int y1)
140 {
141 int dx = FFABS(x1-x0), sx = x0 < x1 ? 1 : -1;
142 int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
143 int err = (dx>dy ? dx : -dy) / 2, e2;
144
145 for (;;) {
146 draw_dot(s, x0, y0, 255);
147
148 if (x0 == x1 && y0 == y1)
149 break;
150
151 e2 = err;
152
153 if (e2 >-dx) {
154 err -= dy;
155 x0 += sx;
156 }
157
158 if (e2 < dy) {
159 err += dx;
160 y0 += sy;
161 }
162 }
163 }
164
165 static void draw_aaline(AudioVectorScopeContext *s, int x0, int y0, int x1, int y1)
166 {
167 int sx = x0 < x1 ? 1 : -1, sy = y0 < y1 ? 1 : -1, x2;
168 int dx = FFABS(x1-x0), dy = FFABS(y1-y0), err = dx * dx + dy * dy;
169 int e2 = err == 0 ? 1 : 0xffffff / (dx + dy);
170
171 dx *= e2;
172 dy *= e2;
173 err = dx - dy;
174
175 for (;;) {
176 draw_dot(s, x0, y0, 255-(FFABS(err - dx + dy) >> 16));
177 e2 = err;
178 x2 = x0;
179 if (2 * e2 >= -dx) {
180 if (x0 == x1)
181 break;
182 if (e2 + dy < 0xff0000)
183 draw_dot(s, x0, y0 + sy, 255-((e2 + dy) >> 16));
184 err -= dy;
185 x0 += sx;
186 }
187
188 if (2 * e2 <= dy) {
189 if (y0 == y1)
190 break;
191 if (dx - e2 < 0xff0000)
192 draw_dot(s, x2 + sx, y0, 255-((dx - e2) >> 16));
193 err += dx;
194 y0 += sy;
195 }
196 }
197 }
198
199 static int fade(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
200 {
201 AudioVectorScopeContext *s = ctx->priv;
202 const int linesize = s->outpicref->linesize[0];
203 const int height = s->outpicref->height;
204 const int slice_start = (height * jobnr ) / nb_jobs;
205 const int slice_end = (height * (jobnr+1)) / nb_jobs;
206
207 if (s->fade[0] == 255 && s->fade[1] == 255 && s->fade[2] == 255) {
208 for (int i = slice_start; i < slice_end; i++)
209 memset(s->outpicref->data[0] + i * linesize, 0, s->outpicref->width * 4);
210 return 0;
211 }
212
213 if (s->fade[0] || s->fade[1] || s->fade[2]) {
214 uint8_t *d = s->outpicref->data[0] + slice_start * linesize;
215 for (int i = slice_start; i < slice_end; i++) {
216 for (int j = 0; j < s->w*4; j+=4) {
217 if (d[j+0])
218 d[j+0] = FFMAX(d[j+0] - s->fade[0], 0);
219 if (d[j+1])
220 d[j+1] = FFMAX(d[j+1] - s->fade[1], 0);
221 if (d[j+2])
222 d[j+2] = FFMAX(d[j+2] - s->fade[2], 0);
223 if (d[j+3])
224 d[j+3] = FFMAX(d[j+3] - s->fade[3], 0);
225 }
226 d += linesize;
227 }
228 }
229
230 return 0;
231 }
232
233 static int query_formats(const AVFilterContext *ctx,
234 AVFilterFormatsConfig **cfg_in,
235 AVFilterFormatsConfig **cfg_out)
236 {
237 AVFilterFormats *formats = NULL;
238 static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
239 static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
240 static const AVChannelLayout layouts[] = {
241 AV_CHANNEL_LAYOUT_STEREO,
242 { .nb_channels = 0 },
243 };
244 int ret;
245
246 formats = ff_make_format_list(sample_fmts);
247 if ((ret = ff_formats_ref (formats, &cfg_in[0]->formats )) < 0)
248 return ret;
249
250 ret = ff_set_common_channel_layouts_from_list2(ctx, cfg_in, cfg_out, layouts);
251 if (ret < 0)
252 return ret;
253
254 formats = ff_make_format_list(pix_fmts);
255 if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0)
256 return ret;
257
258 return 0;
259 }
260
261 static int config_input(AVFilterLink *inlink)
262 {
263 AVFilterContext *ctx = inlink->dst;
264 AudioVectorScopeContext *s = ctx->priv;
265
266 s->nb_samples = FFMAX(1, av_rescale(inlink->sample_rate, s->frame_rate.den, s->frame_rate.num));
267
268 return 0;
269 }
270
271 static int config_output(AVFilterLink *outlink)
272 {
273 AudioVectorScopeContext *s = outlink->src->priv;
274 FilterLink *l = ff_filter_link(outlink);
275
276 outlink->w = s->w;
277 outlink->h = s->h;
278 outlink->sample_aspect_ratio = (AVRational){1,1};
279 l->frame_rate = s->frame_rate;
280 outlink->time_base = av_inv_q(l->frame_rate);
281
282 s->prev_x = s->hw = s->w / 2;
283 s->prev_y = s->hh = s->mode == POLAR ? s->h - 1 : s->h / 2;
284
285 return 0;
286 }
287
288 static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
289 {
290 AVFilterContext *ctx = inlink->dst;
291 AVFilterLink *outlink = ctx->outputs[0];
292 const int16_t *samples = (const int16_t *)insamples->data[0];
293 const float *samplesf = (const float *)insamples->data[0];
294 AudioVectorScopeContext *s = ctx->priv;
295 const int hw = s->hw;
296 const int hh = s->hh;
297 AVFrame *clone;
298 unsigned x, y;
299 unsigned prev_x = s->prev_x, prev_y = s->prev_y;
300 double zoom = s->zoom;
301 int ret;
302
303 if (!s->outpicref || s->outpicref->width != outlink->w ||
304 s->outpicref->height != outlink->h) {
305 av_frame_free(&s->outpicref);
306 s->outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h);
307 if (!s->outpicref) {
308 av_frame_free(&insamples);
309 return AVERROR(ENOMEM);
310 }
311
312 s->outpicref->sample_aspect_ratio = (AVRational){1,1};
313 for (int i = 0; i < outlink->h; i++)
314 memset(s->outpicref->data[0] + i * s->outpicref->linesize[0], 0, outlink->w * 4);
315 }
316 s->outpicref->pts = av_rescale_q(insamples->pts, inlink->time_base, outlink->time_base);
317 s->outpicref->duration = 1;
318
319 ret = ff_inlink_make_frame_writable(outlink, &s->outpicref);
320 if (ret < 0) {
321 av_frame_free(&insamples);
322 return ret;
323 }
324 ff_filter_execute(ctx, fade, NULL, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
325
326 if (zoom < 1) {
327 float max = 0;
328
329 switch (insamples->format) {
330 case AV_SAMPLE_FMT_S16:
331 for (int i = 0; i < insamples->nb_samples * 2; i++) {
332 float sample = samples[i] / (float)INT16_MAX;
333 max = FFMAX(FFABS(sample), max);
334 }
335 break;
336 case AV_SAMPLE_FMT_FLT:
337 for (int i = 0; i < insamples->nb_samples * 2; i++) {
338 max = FFMAX(FFABS(samplesf[i]), max);
339 }
340 break;
341 default:
342 av_assert2(0);
343 }
344
345 switch (s->scale) {
346 case SQRT:
347 max = sqrtf(max);
348 break;
349 case CBRT:
350 max = cbrtf(max);
351 break;
352 case LOG:
353 max = logf(1 + max) / logf(2);
354 break;
355 }
356
357 if (max > 0.f)
358 zoom = 1. / max;
359 }
360
361 for (int i = 0; i < insamples->nb_samples; i++) {
362 float src[2];
363
364 switch (insamples->format) {
365 case AV_SAMPLE_FMT_S16:
366 src[0] = samples[i*2+0] / (float)INT16_MAX;
367 src[1] = samples[i*2+1] / (float)INT16_MAX;
368 break;
369 case AV_SAMPLE_FMT_FLT:
370 src[0] = samplesf[i*2+0];
371 src[1] = samplesf[i*2+1];
372 break;
373 default:
374 av_assert2(0);
375 }
376
377 switch (s->scale) {
378 case SQRT:
379 src[0] = FFSIGN(src[0]) * sqrtf(FFABS(src[0]));
380 src[1] = FFSIGN(src[1]) * sqrtf(FFABS(src[1]));
381 break;
382 case CBRT:
383 src[0] = FFSIGN(src[0]) * cbrtf(FFABS(src[0]));
384 src[1] = FFSIGN(src[1]) * cbrtf(FFABS(src[1]));
385 break;
386 case LOG:
387 src[0] = FFSIGN(src[0]) * logf(1 + FFABS(src[0])) / logf(2);
388 src[1] = FFSIGN(src[1]) * logf(1 + FFABS(src[1])) / logf(2);
389 break;
390 }
391
392 if (s->mirror & 1)
393 src[0] = -src[0];
394
395 if (s->mirror & 2)
396 src[1] = -src[1];
397
398 if (s->swap)
399 FFSWAP(float, src[0], src[1]);
400
401 if (s->mode == LISSAJOUS) {
402 x = ((src[1] - src[0]) * zoom / 2 + 1) * hw;
403 y = (1.0 - (src[0] + src[1]) * zoom / 2) * hh;
404 } else if (s->mode == LISSAJOUS_XY) {
405 x = (src[1] * zoom + 1) * hw;
406 y = (src[0] * zoom + 1) * hh;
407 } else {
408 float sx, sy, cx, cy;
409
410 sx = src[1] * zoom;
411 sy = src[0] * zoom;
412 cx = sx * sqrtf(1 - 0.5 * sy * sy);
413 cy = sy * sqrtf(1 - 0.5 * sx * sx);
414 x = hw + hw * FFSIGN(cx + cy) * (cx - cy) * .7;
415 y = s->h - s->h * fabsf(cx + cy) * .7;
416 }
417
418 if (s->draw == DOT) {
419 draw_dot(s, x, y, 255);
420 } else if (s->draw == LINE) {
421 draw_line(s, x, y, prev_x, prev_y);
422 } else {
423 draw_aaline(s, x, y, prev_x, prev_y);
424 }
425 prev_x = x;
426 prev_y = y;
427 }
428
429 s->prev_x = x, s->prev_y = y;
430 av_frame_free(&insamples);
431
432 clone = av_frame_clone(s->outpicref);
433 if (!clone)
434 return AVERROR(ENOMEM);
435
436 return ff_filter_frame(outlink, clone);
437 }
438
439 static int activate(AVFilterContext *ctx)
440 {
441 AVFilterLink *inlink = ctx->inputs[0];
442 AVFilterLink *outlink = ctx->outputs[0];
443 AudioVectorScopeContext *s = ctx->priv;
444 AVFrame *in;
445 int ret;
446
447 FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
448
449 ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in);
450 if (ret < 0)
451 return ret;
452 if (ret > 0)
453 return filter_frame(inlink, in);
454
455 if (ff_inlink_queued_samples(inlink) >= s->nb_samples) {
456 ff_filter_set_ready(ctx, 10);
457 return 0;
458 }
459
460 FF_FILTER_FORWARD_STATUS(inlink, outlink);
461 FF_FILTER_FORWARD_WANTED(outlink, inlink);
462
463 return FFERROR_NOT_READY;
464 }
465
466 static av_cold void uninit(AVFilterContext *ctx)
467 {
468 AudioVectorScopeContext *s = ctx->priv;
469
470 av_frame_free(&s->outpicref);
471 }
472
473 static const AVFilterPad audiovectorscope_inputs[] = {
474 {
475 .name = "default",
476 .type = AVMEDIA_TYPE_AUDIO,
477 .config_props = config_input,
478 },
479 };
480
481 static const AVFilterPad audiovectorscope_outputs[] = {
482 {
483 .name = "default",
484 .type = AVMEDIA_TYPE_VIDEO,
485 .config_props = config_output,
486 },
487 };
488
489 const AVFilter ff_avf_avectorscope = {
490 .name = "avectorscope",
491 .description = NULL_IF_CONFIG_SMALL("Convert input audio to vectorscope video output."),
492 .uninit = uninit,
493 .priv_size = sizeof(AudioVectorScopeContext),
494 .activate = activate,
495 FILTER_INPUTS(audiovectorscope_inputs),
496 FILTER_OUTPUTS(audiovectorscope_outputs),
497 FILTER_QUERY_FUNC2(query_formats),
498 .priv_class = &avectorscope_class,
499 .flags = AVFILTER_FLAG_SLICE_THREADS,
500 .process_command = ff_filter_process_command,
501 };
502