FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/avf_showwaves.c
Date: 2024-04-23 06:12:56
Exec Total Coverage
Lines: 0 462 0.0%
Functions: 0 32 0.0%
Branches: 0 237 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2012 Stefano Sabatini
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * audio to video multimedia filter
24 */
25
26 #include "config_components.h"
27
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/channel_layout.h"
31 #include "libavutil/intreadwrite.h"
32 #include "libavutil/mem.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/parseutils.h"
35 #include "avfilter.h"
36 #include "filters.h"
37 #include "formats.h"
38 #include "audio.h"
39 #include "video.h"
40 #include "internal.h"
41
42 enum ShowWavesMode {
43 MODE_POINT,
44 MODE_LINE,
45 MODE_P2P,
46 MODE_CENTERED_LINE,
47 MODE_NB,
48 };
49
50 enum ShowWavesScale {
51 SCALE_LIN,
52 SCALE_LOG,
53 SCALE_SQRT,
54 SCALE_CBRT,
55 SCALE_NB,
56 };
57
58 enum ShowWavesDrawMode {
59 DRAW_SCALE,
60 DRAW_FULL,
61 DRAW_NB,
62 };
63
64 enum ShowWavesFilterMode {
65 FILTER_AVERAGE,
66 FILTER_PEAK,
67 FILTER_NB,
68 };
69
70 struct frame_node {
71 AVFrame *frame;
72 struct frame_node *next;
73 };
74
75 typedef struct ShowWavesContext {
76 const AVClass *class;
77 int w, h;
78 AVRational rate;
79 char *colors;
80 int buf_idx;
81 int16_t *buf_idy; /* y coordinate of previous sample for each channel */
82 int16_t *history;
83 int history_nb_samples;
84 int history_index;
85 AVFrame *outpicref;
86 AVRational n, q, c;
87 int pixstep;
88 int mode; ///< ShowWavesMode
89 int scale; ///< ShowWavesScale
90 int draw_mode; ///< ShowWavesDrawMode
91 int split_channels;
92 int filter_mode;
93 uint8_t *fg;
94
95 int (*get_h)(int16_t sample, int height);
96 void (*draw_sample)(uint8_t *buf, int height, int linesize,
97 int16_t *prev_y, const uint8_t color[4], int h);
98
99 /* single picture */
100 int single_pic;
101 struct frame_node *audio_frames;
102 struct frame_node *last_frame;
103 int64_t total_samples;
104 int64_t *sum; /* abs sum of the samples per channel */
105 } ShowWavesContext;
106
107 #define OFFSET(x) offsetof(ShowWavesContext, x)
108 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
109
110 static const AVOption showwaves_options[] = {
111 { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
112 { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
113 { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, .flags=FLAGS, .unit="mode"},
114 { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
115 { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
116 { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
117 { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
118 { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_RATIONAL, {.i64 = 0}, 0, INT_MAX, FLAGS },
119 { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
120 { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
121 { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
122 { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
123 { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
124 { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
125 { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
126 { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
127 { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
128 { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
129 { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
130 { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
131 { NULL }
132 };
133
134 AVFILTER_DEFINE_CLASS(showwaves);
135
136 static av_cold void uninit(AVFilterContext *ctx)
137 {
138 ShowWavesContext *showwaves = ctx->priv;
139
140 av_frame_free(&showwaves->outpicref);
141 av_freep(&showwaves->buf_idy);
142 av_freep(&showwaves->history);
143 av_freep(&showwaves->fg);
144
145 if (showwaves->single_pic) {
146 struct frame_node *node = showwaves->audio_frames;
147 while (node) {
148 struct frame_node *tmp = node;
149
150 node = node->next;
151 av_frame_free(&tmp->frame);
152 av_freep(&tmp);
153 }
154 av_freep(&showwaves->sum);
155 showwaves->last_frame = NULL;
156 }
157 }
158
159 static int query_formats(AVFilterContext *ctx)
160 {
161 AVFilterFormats *formats = NULL;
162 AVFilterChannelLayouts *layouts = NULL;
163 AVFilterLink *inlink = ctx->inputs[0];
164 AVFilterLink *outlink = ctx->outputs[0];
165 static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
166 static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
167 int ret;
168
169 /* set input audio formats */
170 formats = ff_make_format_list(sample_fmts);
171 if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
172 return ret;
173
174 layouts = ff_all_channel_layouts();
175 if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
176 return ret;
177
178 formats = ff_all_samplerates();
179 if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
180 return ret;
181
182 /* set output video format */
183 formats = ff_make_format_list(pix_fmts);
184 if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
185 return ret;
186
187 return 0;
188 }
189
190 static int get_lin_h(int16_t sample, int height)
191 {
192 return height/2 - av_rescale(sample, height/2, INT16_MAX);
193 }
194
195 static int get_lin_h2(int16_t sample, int height)
196 {
197 return av_rescale(FFABS(sample), height, INT16_MAX);
198 }
199
200 static int get_log_h(int16_t sample, int height)
201 {
202 return height/2 - FFSIGN(sample) * (log10(1 + FFABS(sample)) * (height/2) / log10(1 + INT16_MAX));
203 }
204
205 static int get_log_h2(int16_t sample, int height)
206 {
207 return log10(1 + FFABS(sample)) * height / log10(1 + INT16_MAX);
208 }
209
210 static int get_sqrt_h(int16_t sample, int height)
211 {
212 return height/2 - FFSIGN(sample) * (sqrt(FFABS(sample)) * (height/2) / sqrt(INT16_MAX));
213 }
214
215 static int get_sqrt_h2(int16_t sample, int height)
216 {
217 return sqrt(FFABS(sample)) * height / sqrt(INT16_MAX);
218 }
219
220 static int get_cbrt_h(int16_t sample, int height)
221 {
222 return height/2 - FFSIGN(sample) * (cbrt(FFABS(sample)) * (height/2) / cbrt(INT16_MAX));
223 }
224
225 static int get_cbrt_h2(int16_t sample, int height)
226 {
227 return cbrt(FFABS(sample)) * height / cbrt(INT16_MAX);
228 }
229
230 static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize,
231 int16_t *prev_y,
232 const uint8_t color[4], int h)
233 {
234 if (h >= 0 && h < height) {
235 buf[h * linesize + 0] += color[0];
236 buf[h * linesize + 1] += color[1];
237 buf[h * linesize + 2] += color[2];
238 buf[h * linesize + 3] += color[3];
239 }
240 }
241
242 static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize,
243 int16_t *prev_y,
244 const uint8_t color[4], int h)
245 {
246 uint32_t clr = AV_RN32(color);
247 if (h >= 0 && h < height)
248 AV_WN32(buf + h * linesize, clr);
249 }
250
251 static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize,
252 int16_t *prev_y,
253 const uint8_t color[4], int h)
254 {
255 int start = height/2;
256 int end = av_clip(h, 0, height-1);
257 uint8_t *bufk;
258 if (start > end)
259 FFSWAP(int16_t, start, end);
260 bufk = buf + start * linesize;
261 for (int k = start; k < end; k++, bufk += linesize) {
262 bufk[0] += color[0];
263 bufk[1] += color[1];
264 bufk[2] += color[2];
265 bufk[3] += color[3];
266 }
267 }
268
269 static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize,
270 int16_t *prev_y,
271 const uint8_t color[4], int h)
272 {
273 int start = height/2;
274 int end = av_clip(h, 0, height-1);
275 uint32_t clr = AV_RN32(color);
276 uint8_t *bufk;
277 if (start > end)
278 FFSWAP(int16_t, start, end);
279 bufk = buf + start * linesize;
280 for (int k = start; k < end; k++, bufk += linesize)
281 AV_WN32(bufk, clr);
282 }
283
284 static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize,
285 int16_t *prev_y,
286 const uint8_t color[4], int h)
287 {
288 if (h >= 0 && h < height) {
289 buf[h * linesize + 0] += color[0];
290 buf[h * linesize + 1] += color[1];
291 buf[h * linesize + 2] += color[2];
292 buf[h * linesize + 3] += color[3];
293 if (*prev_y && h != *prev_y) {
294 int start = *prev_y;
295 uint8_t *bufk;
296 int end = av_clip(h, 0, height-1);
297 if (start > end)
298 FFSWAP(int16_t, start, end);
299 bufk = buf + (start + 1) * linesize;
300 for (int k = start + 1; k < end; k++, bufk += linesize) {
301 bufk[0] += color[0];
302 bufk[1] += color[1];
303 bufk[2] += color[2];
304 bufk[3] += color[3];
305 }
306 }
307 }
308 *prev_y = h;
309 }
310
311 static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize,
312 int16_t *prev_y,
313 const uint8_t color[4], int h)
314 {
315 uint32_t clr = AV_RN32(color);
316 if (h >= 0 && h < height) {
317 AV_WN32(buf + h * linesize, clr);
318 if (*prev_y && h != *prev_y) {
319 int start = *prev_y;
320 uint8_t *bufk;
321 int end = av_clip(h, 0, height-1);
322 if (start > end)
323 FFSWAP(int16_t, start, end);
324 bufk = buf + (start + 1) * linesize;
325 for (int k = start + 1; k < end; k++, bufk += linesize)
326 AV_WN32(bufk, clr);
327 }
328 }
329 *prev_y = h;
330 }
331
332 static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize,
333 int16_t *prev_y,
334 const uint8_t color[4], int h)
335 {
336 const int start = (height - h) / 2;
337 const int end = start + h;
338 uint8_t *bufk = buf + start * linesize;
339 for (int k = start; k < end; k++, bufk += linesize) {
340 bufk[0] += color[0];
341 bufk[1] += color[1];
342 bufk[2] += color[2];
343 bufk[3] += color[3];
344 }
345 }
346
347 static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize,
348 int16_t *prev_y,
349 const uint8_t color[4], int h)
350 {
351 uint32_t clr = AV_RN32(color);
352 const int start = (height - h) / 2;
353 const int end = start + h;
354 uint8_t *bufk = buf + start * linesize;
355 for (int k = start; k < end; k++, bufk += linesize)
356 AV_WN32(bufk, clr);
357 }
358
359 static void draw_sample_point_gray(uint8_t *buf, int height, int linesize,
360 int16_t *prev_y,
361 const uint8_t color[4], int h)
362 {
363 if (h >= 0 && h < height)
364 buf[h * linesize] += color[0];
365 }
366
367 static void draw_sample_line_gray(uint8_t *buf, int height, int linesize,
368 int16_t *prev_y,
369 const uint8_t color[4], int h)
370 {
371 int k;
372 int start = height/2;
373 int end = av_clip(h, 0, height-1);
374 if (start > end)
375 FFSWAP(int16_t, start, end);
376 for (k = start; k < end; k++)
377 buf[k * linesize] += color[0];
378 }
379
380 static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize,
381 int16_t *prev_y,
382 const uint8_t color[4], int h)
383 {
384 int k;
385 if (h >= 0 && h < height) {
386 buf[h * linesize] += color[0];
387 if (*prev_y && h != *prev_y) {
388 int start = *prev_y;
389 int end = av_clip(h, 0, height-1);
390 if (start > end)
391 FFSWAP(int16_t, start, end);
392 for (k = start + 1; k < end; k++)
393 buf[k * linesize] += color[0];
394 }
395 }
396 *prev_y = h;
397 }
398
399 static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize,
400 int16_t *prev_y,
401 const uint8_t color[4], int h)
402 {
403 int k;
404 const int start = (height - h) / 2;
405 const int end = start + h;
406 for (k = start; k < end; k++)
407 buf[k * linesize] += color[0];
408 }
409
410 static int config_output(AVFilterLink *outlink)
411 {
412 AVFilterContext *ctx = outlink->src;
413 AVFilterLink *inlink = ctx->inputs[0];
414 ShowWavesContext *showwaves = ctx->priv;
415 int nb_channels = inlink->ch_layout.nb_channels;
416 char *colors, *saveptr = NULL;
417 uint8_t x;
418 int ch;
419
420 showwaves->q = av_make_q(0, 1);
421 showwaves->c = av_make_q(0, 1);
422
423 if (showwaves->single_pic) {
424 showwaves->n = av_make_q(1, 1);
425 outlink->frame_rate = av_make_q(1, 1);
426 } else {
427 if (!showwaves->n.num || !showwaves->n.den) {
428 showwaves->n = av_mul_q(av_make_q(inlink->sample_rate,
429 showwaves->w), av_inv_q(showwaves->rate));
430 outlink->frame_rate = showwaves->rate;
431 } else {
432 outlink->frame_rate = av_div_q(av_make_q(inlink->sample_rate, showwaves->w), showwaves->n);
433 }
434 }
435
436 showwaves->buf_idx = 0;
437 if (!FF_ALLOCZ_TYPED_ARRAY(showwaves->buf_idy, nb_channels)) {
438 av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
439 return AVERROR(ENOMEM);
440 }
441
442 showwaves->history_nb_samples = av_rescale(showwaves->w * nb_channels * 2,
443 showwaves->n.num, showwaves->n.den);
444 if (showwaves->history_nb_samples <= 0)
445 return AVERROR(EINVAL);
446 showwaves->history = av_calloc(showwaves->history_nb_samples,
447 sizeof(*showwaves->history));
448 if (!showwaves->history)
449 return AVERROR(ENOMEM);
450
451 outlink->time_base = av_inv_q(outlink->frame_rate);
452 outlink->w = showwaves->w;
453 outlink->h = showwaves->h;
454 outlink->sample_aspect_ratio = (AVRational){1,1};
455
456 av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%f\n",
457 showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), av_q2d(showwaves->n));
458
459 switch (outlink->format) {
460 case AV_PIX_FMT_GRAY8:
461 switch (showwaves->mode) {
462 case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break;
463 case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break;
464 case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break;
465 case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break;
466 default:
467 return AVERROR_BUG;
468 }
469 showwaves->pixstep = 1;
470 break;
471 case AV_PIX_FMT_RGBA:
472 switch (showwaves->mode) {
473 case MODE_POINT: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_point_rgba_scale : draw_sample_point_rgba_full; break;
474 case MODE_LINE: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_line_rgba_scale : draw_sample_line_rgba_full; break;
475 case MODE_P2P: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_p2p_rgba_scale : draw_sample_p2p_rgba_full; break;
476 case MODE_CENTERED_LINE: showwaves->draw_sample = showwaves->draw_mode == DRAW_SCALE ? draw_sample_cline_rgba_scale : draw_sample_cline_rgba_full; break;
477 default:
478 return AVERROR_BUG;
479 }
480 showwaves->pixstep = 4;
481 break;
482 }
483
484 switch (showwaves->scale) {
485 case SCALE_LIN:
486 switch (showwaves->mode) {
487 case MODE_POINT:
488 case MODE_LINE:
489 case MODE_P2P: showwaves->get_h = get_lin_h; break;
490 case MODE_CENTERED_LINE: showwaves->get_h = get_lin_h2; break;
491 default:
492 return AVERROR_BUG;
493 }
494 break;
495 case SCALE_LOG:
496 switch (showwaves->mode) {
497 case MODE_POINT:
498 case MODE_LINE:
499 case MODE_P2P: showwaves->get_h = get_log_h; break;
500 case MODE_CENTERED_LINE: showwaves->get_h = get_log_h2; break;
501 default:
502 return AVERROR_BUG;
503 }
504 break;
505 case SCALE_SQRT:
506 switch (showwaves->mode) {
507 case MODE_POINT:
508 case MODE_LINE:
509 case MODE_P2P: showwaves->get_h = get_sqrt_h; break;
510 case MODE_CENTERED_LINE: showwaves->get_h = get_sqrt_h2; break;
511 default:
512 return AVERROR_BUG;
513 }
514 break;
515 case SCALE_CBRT:
516 switch (showwaves->mode) {
517 case MODE_POINT:
518 case MODE_LINE:
519 case MODE_P2P: showwaves->get_h = get_cbrt_h; break;
520 case MODE_CENTERED_LINE: showwaves->get_h = get_cbrt_h2; break;
521 default:
522 return AVERROR_BUG;
523 }
524 break;
525 }
526
527 showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg));
528 if (!showwaves->fg)
529 return AVERROR(ENOMEM);
530
531 colors = av_strdup(showwaves->colors);
532 if (!colors)
533 return AVERROR(ENOMEM);
534
535 if (showwaves->draw_mode == DRAW_SCALE) {
536 /* multiplication factor, pre-computed to avoid in-loop divisions */
537 x = (showwaves->n.den * 255) / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n.num);
538 } else {
539 x = 255;
540 }
541 if (outlink->format == AV_PIX_FMT_RGBA) {
542 uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
543
544 for (ch = 0; ch < nb_channels; ch++) {
545 char *color;
546
547 color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
548 if (color)
549 av_parse_color(fg, color, -1, ctx);
550 showwaves->fg[4*ch + 0] = fg[0] * x / 255.;
551 showwaves->fg[4*ch + 1] = fg[1] * x / 255.;
552 showwaves->fg[4*ch + 2] = fg[2] * x / 255.;
553 showwaves->fg[4*ch + 3] = fg[3] * x / 255.;
554 }
555 } else {
556 for (ch = 0; ch < nb_channels; ch++)
557 showwaves->fg[4 * ch + 0] = x;
558 }
559 av_free(colors);
560
561 return 0;
562 }
563
564 inline static int push_frame(AVFilterLink *outlink, int i, int64_t pts)
565 {
566 AVFilterContext *ctx = outlink->src;
567 AVFilterLink *inlink = ctx->inputs[0];
568 ShowWavesContext *showwaves = outlink->src->priv;
569 int nb_channels = inlink->ch_layout.nb_channels;
570 int ret;
571
572 showwaves->outpicref->duration = 1;
573 showwaves->outpicref->pts = av_rescale_q(pts + i,
574 inlink->time_base,
575 outlink->time_base);
576
577 ret = ff_filter_frame(outlink, showwaves->outpicref);
578 showwaves->outpicref = NULL;
579 showwaves->buf_idx = 0;
580 for (int i = 0; i < nb_channels; i++)
581 showwaves->buf_idy[i] = 0;
582 return ret;
583 }
584
585 static int push_single_pic(AVFilterLink *outlink)
586 {
587 AVFilterContext *ctx = outlink->src;
588 AVFilterLink *inlink = ctx->inputs[0];
589 ShowWavesContext *showwaves = ctx->priv;
590 int64_t n = 0, column_max_samples = showwaves->total_samples / outlink->w;
591 int64_t remaining_samples = showwaves->total_samples - (column_max_samples * outlink->w);
592 int64_t last_column_samples = column_max_samples + remaining_samples;
593 AVFrame *out = showwaves->outpicref;
594 struct frame_node *node;
595 const int nb_channels = inlink->ch_layout.nb_channels;
596 const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
597 const int linesize = out->linesize[0];
598 const int pixstep = showwaves->pixstep;
599 int col = 0;
600 int64_t *sum = showwaves->sum;
601
602 if (column_max_samples == 0) {
603 av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
604 return AVERROR(EINVAL);
605 }
606
607 av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", column_max_samples);
608
609 memset(sum, 0, nb_channels * sizeof(*sum));
610
611 for (node = showwaves->audio_frames; node; node = node->next) {
612 int i;
613 const AVFrame *frame = node->frame;
614 const int16_t *p = (const int16_t *)frame->data[0];
615
616 for (i = 0; i < frame->nb_samples; i++) {
617 int64_t max_samples = col == outlink->w - 1 ? last_column_samples: column_max_samples;
618 int ch;
619
620 switch (showwaves->filter_mode) {
621 case FILTER_AVERAGE:
622 for (ch = 0; ch < nb_channels; ch++)
623 sum[ch] += abs(p[ch + i*nb_channels]);
624 break;
625 case FILTER_PEAK:
626 for (ch = 0; ch < nb_channels; ch++)
627 sum[ch] = FFMAX(sum[ch], abs(p[ch + i*nb_channels]));
628 break;
629 }
630
631 n++;
632 if (n == max_samples) {
633 for (ch = 0; ch < nb_channels; ch++) {
634 int16_t sample = sum[ch] / (showwaves->filter_mode == FILTER_AVERAGE ? max_samples : 1);
635 uint8_t *buf = out->data[0] + col * pixstep;
636 int h;
637
638 if (showwaves->split_channels)
639 buf += ch*ch_height*linesize;
640 av_assert0(col < outlink->w);
641 h = showwaves->get_h(sample, ch_height);
642 showwaves->draw_sample(buf, ch_height, linesize, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4], h);
643 sum[ch] = 0;
644 }
645 col++;
646 n = 0;
647 }
648 }
649 }
650
651 return push_frame(outlink, 0, 0);
652 }
653
654
655 static int request_frame(AVFilterLink *outlink)
656 {
657 ShowWavesContext *showwaves = outlink->src->priv;
658 AVFilterLink *inlink = outlink->src->inputs[0];
659 int ret;
660
661 ret = ff_request_frame(inlink);
662 if (ret == AVERROR_EOF && showwaves->outpicref) {
663 push_single_pic(outlink);
664 }
665
666 return ret;
667 }
668
669 static int alloc_out_frame(ShowWavesContext *showwaves,
670 AVFilterLink *outlink)
671 {
672 if (!showwaves->outpicref) {
673 AVFrame *out = showwaves->outpicref =
674 ff_get_video_buffer(outlink, outlink->w, outlink->h);
675 if (!out)
676 return AVERROR(ENOMEM);
677 out->width = outlink->w;
678 out->height = outlink->h;
679 for (int j = 0; j < outlink->h; j++)
680 memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep);
681 }
682 return 0;
683 }
684
685 static av_cold int init(AVFilterContext *ctx)
686 {
687 ShowWavesContext *showwaves = ctx->priv;
688
689 if (!strcmp(ctx->filter->name, "showwavespic")) {
690 showwaves->single_pic = 1;
691 showwaves->mode = MODE_CENTERED_LINE;
692 }
693
694 return 0;
695 }
696
697 #if CONFIG_SHOWWAVES_FILTER
698
699 static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
700 {
701 AVFilterContext *ctx = inlink->dst;
702 AVFilterLink *outlink = ctx->outputs[0];
703 ShowWavesContext *showwaves = ctx->priv;
704 const int nb_samples = insamples->nb_samples;
705 AVFrame *outpicref = showwaves->outpicref;
706 const int16_t *p = (const int16_t *)insamples->data[0];
707 int16_t *history = showwaves->history;
708 const int nb_channels = inlink->ch_layout.nb_channels;
709 int i, j, ret = 0, linesize;
710 const int pixstep = showwaves->pixstep;
711 const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
712 const int history_nb_samples = showwaves->history_nb_samples;
713 const int split_channels = showwaves->split_channels;
714 const AVRational i_n = av_inv_q(showwaves->n);
715 const AVRational u_q = av_make_q(1, 1);
716 const AVRational z_q = av_make_q(0, 1);
717 int16_t *buf_idy = showwaves->buf_idy;
718 int idx = showwaves->history_index;
719 int buf_idx = showwaves->buf_idx;
720 const uint8_t *fg = showwaves->fg;
721 const int w = showwaves->w;
722 uint8_t *dst;
723
724 for (int n = 0; n < nb_samples * nb_channels; n++) {
725 history[idx++] = p[n];
726 if (idx >= history_nb_samples)
727 idx = 0;
728 }
729 showwaves->history_index = idx;
730
731 ret = alloc_out_frame(showwaves, outlink);
732 if (ret < 0)
733 goto end;
734 outpicref = showwaves->outpicref;
735 linesize = outpicref->linesize[0];
736
737 /* draw data in the buffer */
738 dst = outpicref->data[0];
739 for (i = 0; i < history_nb_samples; i++) {
740 for (j = 0; j < nb_channels; j++) {
741 uint8_t *buf = dst + buf_idx * pixstep;
742 int h;
743
744 if (split_channels)
745 buf += j*ch_height*linesize;
746 h = showwaves->get_h(history[idx++], ch_height);
747 if (idx >= history_nb_samples)
748 idx = 0;
749 showwaves->draw_sample(buf, ch_height, linesize,
750 &buf_idy[j], &fg[j * 4], h);
751 }
752
753 showwaves->c = av_add_q(showwaves->c, i_n);
754 if (av_cmp_q(showwaves->c, u_q) >= 0) {
755 showwaves->c = z_q;
756 buf_idx++;
757 }
758 if (buf_idx == w)
759 break;
760 }
761
762 showwaves->buf_idx = buf_idx;
763
764 if ((ret = push_frame(outlink, history_nb_samples - i - 1, insamples->pts)) < 0)
765 goto end;
766 outpicref = showwaves->outpicref;
767 end:
768 av_frame_free(&insamples);
769 return ret;
770 }
771
772 static int activate(AVFilterContext *ctx)
773 {
774 AVFilterLink *inlink = ctx->inputs[0];
775 AVFilterLink *outlink = ctx->outputs[0];
776 ShowWavesContext *showwaves = ctx->priv;
777 AVRational q;
778 AVFrame *in;
779 int nb_samples;
780 int ret;
781
782 FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
783
784 q = av_add_q(showwaves->q, av_mul_q(av_make_q(outlink->w, 1), showwaves->n));
785 nb_samples = (q.num + (q.den / 2)) / q.den;
786 ret = ff_inlink_consume_samples(inlink, nb_samples, nb_samples, &in);
787 if (ret < 0)
788 return ret;
789 if (ret > 0) {
790 showwaves->q = av_sub_q(q, av_make_q(nb_samples, 1));
791 return showwaves_filter_frame(inlink, in);
792 }
793
794 FF_FILTER_FORWARD_STATUS(inlink, outlink);
795 FF_FILTER_FORWARD_WANTED(outlink, inlink);
796
797 return FFERROR_NOT_READY;
798 }
799
800 static const AVFilterPad showwaves_outputs[] = {
801 {
802 .name = "default",
803 .type = AVMEDIA_TYPE_VIDEO,
804 .config_props = config_output,
805 },
806 };
807
808 const AVFilter ff_avf_showwaves = {
809 .name = "showwaves",
810 .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
811 .init = init,
812 .uninit = uninit,
813 .priv_size = sizeof(ShowWavesContext),
814 FILTER_INPUTS(ff_audio_default_filterpad),
815 .activate = activate,
816 FILTER_OUTPUTS(showwaves_outputs),
817 FILTER_QUERY_FUNC(query_formats),
818 .priv_class = &showwaves_class,
819 };
820
821 #endif // CONFIG_SHOWWAVES_FILTER
822
823 #if CONFIG_SHOWWAVESPIC_FILTER
824
825 #define OFFSET(x) offsetof(ShowWavesContext, x)
826 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
827
828 static const AVOption showwavespic_options[] = {
829 { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
830 { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
831 { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
832 { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
833 { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
834 { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
835 { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
836 { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
837 { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
838 { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
839 { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
840 { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
841 { "filter", "set filter mode", OFFSET(filter_mode), AV_OPT_TYPE_INT, {.i64 = FILTER_AVERAGE}, 0, FILTER_NB-1, FLAGS, .unit="filter" },
842 { "average", "use average samples", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_AVERAGE}, .flags=FLAGS, .unit="filter"},
843 { "peak", "use peak samples", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_PEAK}, .flags=FLAGS, .unit="filter"},
844 { NULL }
845 };
846
847 AVFILTER_DEFINE_CLASS(showwavespic);
848
849 static int showwavespic_config_input(AVFilterLink *inlink)
850 {
851 AVFilterContext *ctx = inlink->dst;
852 ShowWavesContext *showwaves = ctx->priv;
853
854 if (showwaves->single_pic) {
855 showwaves->sum = av_calloc(inlink->ch_layout.nb_channels, sizeof(*showwaves->sum));
856 if (!showwaves->sum)
857 return AVERROR(ENOMEM);
858 }
859
860 return 0;
861 }
862
863 static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
864 {
865 AVFilterContext *ctx = inlink->dst;
866 AVFilterLink *outlink = ctx->outputs[0];
867 ShowWavesContext *showwaves = ctx->priv;
868 int ret = 0;
869
870 if (showwaves->single_pic) {
871 struct frame_node *f;
872
873 ret = alloc_out_frame(showwaves, outlink);
874 if (ret < 0)
875 goto end;
876
877 /* queue the audio frame */
878 f = av_malloc(sizeof(*f));
879 if (!f) {
880 ret = AVERROR(ENOMEM);
881 goto end;
882 }
883 f->frame = insamples;
884 f->next = NULL;
885 if (!showwaves->last_frame) {
886 showwaves->audio_frames =
887 showwaves->last_frame = f;
888 } else {
889 showwaves->last_frame->next = f;
890 showwaves->last_frame = f;
891 }
892 showwaves->total_samples += insamples->nb_samples;
893
894 return 0;
895 }
896
897 end:
898 av_frame_free(&insamples);
899 return ret;
900 }
901
902 static const AVFilterPad showwavespic_inputs[] = {
903 {
904 .name = "default",
905 .type = AVMEDIA_TYPE_AUDIO,
906 .config_props = showwavespic_config_input,
907 .filter_frame = showwavespic_filter_frame,
908 },
909 };
910
911 static const AVFilterPad showwavespic_outputs[] = {
912 {
913 .name = "default",
914 .type = AVMEDIA_TYPE_VIDEO,
915 .config_props = config_output,
916 .request_frame = request_frame,
917 },
918 };
919
920 const AVFilter ff_avf_showwavespic = {
921 .name = "showwavespic",
922 .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
923 .init = init,
924 .uninit = uninit,
925 .priv_size = sizeof(ShowWavesContext),
926 FILTER_INPUTS(showwavespic_inputs),
927 FILTER_OUTPUTS(showwavespic_outputs),
928 FILTER_QUERY_FUNC(query_formats),
929 .priv_class = &showwavespic_class,
930 };
931
932 #endif // CONFIG_SHOWWAVESPIC_FILTER
933