FFmpeg coverage


Directory: ../../../ffmpeg/
File: src/libavfilter/avf_a3dscope.c
Date: 2024-04-18 10:05:09
Exec Total Coverage
Lines: 0 152 0.0%
Functions: 0 12 0.0%
Branches: 0 42 0.0%

Line Branch Exec Source
1 /*
2 * Copyright (c) 2022 Paul B Mahol
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "libavutil/avassert.h"
22 #include "libavutil/channel_layout.h"
23 #include "libavutil/opt.h"
24 #include "libavutil/parseutils.h"
25 #include "avfilter.h"
26 #include "filters.h"
27 #include "formats.h"
28 #include "audio.h"
29 #include "video.h"
30 #include "internal.h"
31
32 typedef struct Audio3dScopeContext {
33 const AVClass *class;
34 int w, h;
35 int size;
36 float fov;
37 float roll;
38 float pitch;
39 float yaw;
40 float zoom[3];
41 float eye[3];
42
43 AVRational frame_rate;
44 int nb_samples;
45
46 float view_matrix[4][4];
47 float projection_matrix[4][4];
48
49 AVFrame *frames[60];
50 } Audio3dScopeContext;
51
52 #define OFFSET(x) offsetof(Audio3dScopeContext, x)
53 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
54 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
55
56 static const AVOption a3dscope_options[] = {
57 { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
58 { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS },
59 { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, FLAGS },
60 { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, FLAGS },
61 { "fov", "set camera FoV", OFFSET(fov), AV_OPT_TYPE_FLOAT, {.dbl=90.f}, 40, 150, TFLAGS },
62 { "roll", "set camera roll",OFFSET(roll), AV_OPT_TYPE_FLOAT, {.dbl=0.f}, -180, 180, TFLAGS },
63 { "pitch","set camera pitch",OFFSET(pitch), AV_OPT_TYPE_FLOAT, {.dbl=0.f}, -180, 180, TFLAGS },
64 { "yaw", "set camera yaw", OFFSET(yaw), AV_OPT_TYPE_FLOAT, {.dbl=0.f}, -180, 180, TFLAGS },
65 { "xzoom","set camera zoom", OFFSET(zoom[0]),AV_OPT_TYPE_FLOAT, {.dbl=1.f}, 0.01, 10, TFLAGS },
66 { "yzoom","set camera zoom", OFFSET(zoom[1]),AV_OPT_TYPE_FLOAT, {.dbl=1.f}, 0.01, 10, TFLAGS },
67 { "zzoom","set camera zoom", OFFSET(zoom[2]),AV_OPT_TYPE_FLOAT, {.dbl=1.f}, 0.01, 10, TFLAGS },
68 { "xpos", "set camera position", OFFSET(eye[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.f},-60.f, 60.f, TFLAGS },
69 { "ypos", "set camera position", OFFSET(eye[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.f},-60.f, 60.f, TFLAGS },
70 { "zpos", "set camera position", OFFSET(eye[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.f},-60.f, 60.f, TFLAGS },
71 { "length","set length", OFFSET(size), AV_OPT_TYPE_INT, {.i64=15}, 1, 60, FLAGS },
72 { NULL }
73 };
74
75 AVFILTER_DEFINE_CLASS(a3dscope);
76
77 static int query_formats(AVFilterContext *ctx)
78 {
79 AVFilterFormats *formats = NULL;
80 AVFilterChannelLayouts *layouts = NULL;
81 AVFilterLink *inlink = ctx->inputs[0];
82 AVFilterLink *outlink = ctx->outputs[0];
83 static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
84 static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
85 int ret;
86
87 formats = ff_make_format_list(sample_fmts);
88 if ((ret = ff_formats_ref (formats, &inlink->outcfg.formats )) < 0)
89 return ret;
90
91 formats = ff_all_samplerates();
92 if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
93 return ret;
94
95 formats = ff_make_format_list(pix_fmts);
96 if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
97 return ret;
98
99 layouts = ff_all_channel_counts();
100 if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
101 return ret;
102
103 return 0;
104 }
105
106 static int config_input(AVFilterLink *inlink)
107 {
108 AVFilterContext *ctx = inlink->dst;
109 Audio3dScopeContext *s = ctx->priv;
110
111 s->nb_samples = FFMAX(1, av_rescale(inlink->sample_rate, s->frame_rate.den, s->frame_rate.num));
112
113 return 0;
114 }
115
116 static int config_output(AVFilterLink *outlink)
117 {
118 Audio3dScopeContext *s = outlink->src->priv;
119
120 outlink->w = s->w;
121 outlink->h = s->h;
122 outlink->sample_aspect_ratio = (AVRational){1,1};
123 outlink->frame_rate = s->frame_rate;
124 outlink->time_base = av_inv_q(outlink->frame_rate);
125
126 return 0;
127 }
128
129 static void projection_matrix(float fov, float a, float near, float far,
130 float matrix[4][4])
131 {
132 float f;
133
134 memset(matrix, 0, sizeof(*matrix));
135
136 f = 1.0f / tanf(fov * 0.5f * M_PI / 180.f);
137 matrix[0][0] = f * a;
138 matrix[1][1] = f;
139 matrix[2][2] = -(far + near) / (far - near);
140 matrix[2][3] = -1.f;
141 matrix[3][2] = -(near * far) / (far - near);
142 }
143
144 static inline void vmultiply(const float v[4], const float m[4][4], float d[4])
145 {
146 d[0] = v[0] * m[0][0] + v[1] * m[1][0] + v[2] * m[2][0] + v[3] * m[3][0];
147 d[1] = v[0] * m[0][1] + v[1] * m[1][1] + v[2] * m[2][1] + v[3] * m[3][1];
148 d[2] = v[0] * m[0][2] + v[1] * m[1][2] + v[2] * m[2][2] + v[3] * m[3][2];
149 d[3] = v[0] * m[0][3] + v[1] * m[1][3] + v[2] * m[2][3] + v[3] * m[3][3];
150 }
151
152 static void mmultiply(const float m2[4][4], const float m1[4][4], float m[4][4])
153 {
154 vmultiply(m2[0], m1, m[0]);
155 vmultiply(m2[1], m1, m[1]);
156 vmultiply(m2[2], m1, m[2]);
157 vmultiply(m2[3], m1, m[3]);
158 }
159
160 static float vdot(const float x[3], const float y[3])
161 {
162 return x[0] * y[0] + x[1] * y[1] + x[2] * y[2];
163 }
164
165 static void view_matrix(const float eye[3],
166 const float z[3],
167 const float roll,
168 const float pitch,
169 const float yaw, float m[4][4])
170 {
171 float cr = cosf(roll * M_PI / 180.f);
172 float sr = sinf(roll * M_PI / 180.f);
173 float cp = cosf(pitch * M_PI / 180.f);
174 float sp = sinf(pitch * M_PI / 180.f);
175 float cy = cosf(yaw * M_PI / 180.f);
176 float sy = sinf(yaw * M_PI / 180.f);
177 float t[4][4];
178 float rx[4][4] = {
179 {z[0], 0.f, 0.f, 0.f },
180 { 0.f, cy, -sy, 0.f },
181 { 0.f, sy, cy, 0.f },
182 { 0.f, 0.f, 0.f, 1.f },
183 };
184
185 float ry[4][4] = {
186 { cp, 0.f, sp, 0.f },
187 { 0.f,z[1], 0.f, 0.f },
188 {-sp, 0.f, cp, 0.f },
189 { 0.f, 0.f, 0.f, 1.f },
190 };
191
192 float rz[4][4] = {
193 { cr, -sr, 0.f, 0.f },
194 { sr, cr, 0.f, 0.f },
195 { 0.f, 0.f,z[2], 0.f },
196 { 0.f, 0.f, 0.f, 1.f },
197 };
198
199 memset(m, 0, sizeof(*m));
200
201 mmultiply(rx, ry, t);
202 mmultiply(rz, t, m);
203
204 m[3][0] = -vdot(m[0], eye);
205 m[3][1] = -vdot(m[1], eye);
206 m[3][2] = -vdot(m[2], eye);
207 }
208
209 static void draw_dot(AVFrame *out, unsigned x, unsigned y, float z,
210 int r, int g, int b)
211 {
212 const ptrdiff_t linesize = out->linesize[0];
213 uint8_t *dst;
214
215 dst = out->data[0] + y * linesize + x * 4;
216 dst[0] = r * z;
217 dst[1] = g * z;
218 dst[2] = b * z;
219 dst[3] = 255 * z;
220 }
221
222 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
223 {
224 AVFilterContext *ctx = inlink->dst;
225 AVFilterLink *outlink = ctx->outputs[0];
226 Audio3dScopeContext *s = ctx->priv;
227 const float half_height = (s->h - 1) * 0.5f;
228 const float half_width = (s->w - 1) * 0.5f;
229 float matrix[4][4];
230 const int w = s->w;
231 const int h = s->h;
232 AVFrame *out;
233
234 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
235 if (!out) {
236 av_frame_free(&in);
237 return AVERROR(ENOMEM);
238 }
239
240 s->frames[0] = in;
241
242 out->sample_aspect_ratio = (AVRational){1,1};
243 for (int y = 0; y < outlink->h; y++)
244 memset(out->data[0] + y * out->linesize[0], 0, outlink->w * 4);
245 out->pts = av_rescale_q(in->pts, inlink->time_base, outlink->time_base);
246 out->duration = 1;
247
248 projection_matrix(s->fov, half_width / half_height, 0.1f, 1000000.f, s->projection_matrix);
249 view_matrix(s->eye, s->zoom, s->roll, s->pitch, s->yaw, s->view_matrix);
250 mmultiply(s->projection_matrix, s->view_matrix, matrix);
251
252 for (int nb_frame = s->size - 1; nb_frame >= 0; nb_frame--) {
253 const float scale = 1.f / s->nb_samples;
254 AVFrame *frame = s->frames[nb_frame];
255 float channels;
256
257 if (!frame)
258 continue;
259
260 channels = frame->ch_layout.nb_channels;
261 for (int ch = 0; ch < channels; ch++) {
262 const float *src = (float *)frame->extended_data[ch];
263 const int r = 128.f + 127.f * sinf(ch / (channels - 1) * M_PI);
264 const int g = 128.f + 127.f * ch / (channels - 1);
265 const int b = 128.f + 127.f * cosf(ch / (channels - 1) * M_PI);
266
267 for (int n = frame->nb_samples - 1, nn = s->nb_samples * nb_frame; n >= 0; n--, nn++) {
268 float v[4] = { src[n], ch - (channels - 1) * 0.5f, -0.1f + -nn * scale, 1.f };
269 float d[4];
270 int x, y;
271
272 vmultiply(v, matrix, d);
273
274 d[0] /= d[3];
275 d[1] /= d[3];
276
277 x = d[0] * half_width + half_width;
278 y = d[1] * half_height + half_height;
279
280 if (x >= w || y >= h || x < 0 || y < 0)
281 continue;
282
283 draw_dot(out, x, y, av_clipf(1.f / d[3], 0.f, 1.f),
284 r, g, b);
285 }
286 }
287 }
288
289 av_frame_free(&s->frames[59]);
290 memmove(&s->frames[1], &s->frames[0], 59 * sizeof(AVFrame *));
291 s->frames[0] = NULL;
292
293 return ff_filter_frame(outlink, out);
294 }
295
296 static int activate(AVFilterContext *ctx)
297 {
298 AVFilterLink *inlink = ctx->inputs[0];
299 AVFilterLink *outlink = ctx->outputs[0];
300 Audio3dScopeContext *s = ctx->priv;
301 AVFrame *in;
302 int ret;
303
304 FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
305
306 ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in);
307 if (ret < 0)
308 return ret;
309 if (ret > 0)
310 return filter_frame(inlink, in);
311
312 if (ff_inlink_queued_samples(inlink) >= s->nb_samples) {
313 ff_filter_set_ready(ctx, 10);
314 return 0;
315 }
316
317 FF_FILTER_FORWARD_STATUS(inlink, outlink);
318 FF_FILTER_FORWARD_WANTED(outlink, inlink);
319
320 return FFERROR_NOT_READY;
321 }
322
323 static av_cold void uninit(AVFilterContext *ctx)
324 {
325 Audio3dScopeContext *s = ctx->priv;
326
327 for (int n = 0; n < 60; n++)
328 av_frame_free(&s->frames[n]);
329 }
330
331 static const AVFilterPad audio3dscope_inputs[] = {
332 {
333 .name = "default",
334 .type = AVMEDIA_TYPE_AUDIO,
335 .config_props = config_input,
336 },
337 };
338
339 static const AVFilterPad audio3dscope_outputs[] = {
340 {
341 .name = "default",
342 .type = AVMEDIA_TYPE_VIDEO,
343 .config_props = config_output,
344 },
345 };
346
347 const AVFilter ff_avf_a3dscope = {
348 .name = "a3dscope",
349 .description = NULL_IF_CONFIG_SMALL("Convert input audio to 3d scope video output."),
350 .uninit = uninit,
351 .priv_size = sizeof(Audio3dScopeContext),
352 .activate = activate,
353 FILTER_INPUTS(audio3dscope_inputs),
354 FILTER_OUTPUTS(audio3dscope_outputs),
355 FILTER_QUERY_FUNC(query_formats),
356 .priv_class = &a3dscope_class,
357 .process_command = ff_filter_process_command,
358 };
359