| Line | Branch | Exec | Source |
|---|---|---|---|
| 1 | /* | ||
| 2 | * Copyright (c) 2016 Paul B Mahol | ||
| 3 | * | ||
| 4 | * This file is part of FFmpeg. | ||
| 5 | * | ||
| 6 | * FFmpeg is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU Lesser General Public | ||
| 8 | * License as published by the Free Software Foundation; either | ||
| 9 | * version 2.1 of the License, or (at your option) any later version. | ||
| 10 | * | ||
| 11 | * FFmpeg is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | * Lesser General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU Lesser General Public | ||
| 17 | * License along with FFmpeg; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include "libavutil/avstring.h" | ||
| 22 | #include "libavutil/intreadwrite.h" | ||
| 23 | #include "libavutil/mem.h" | ||
| 24 | #include "libavutil/opt.h" | ||
| 25 | #include "libavutil/parseutils.h" | ||
| 26 | #include "avfilter.h" | ||
| 27 | #include "filters.h" | ||
| 28 | #include "formats.h" | ||
| 29 | #include "audio.h" | ||
| 30 | #include "video.h" | ||
| 31 | |||
| 32 | typedef struct AudioBitScopeContext { | ||
| 33 | const AVClass *class; | ||
| 34 | int w, h; | ||
| 35 | AVRational frame_rate; | ||
| 36 | char *colors; | ||
| 37 | int mode; | ||
| 38 | |||
| 39 | int nb_channels; | ||
| 40 | int nb_samples; | ||
| 41 | int depth; | ||
| 42 | int current_vpos; | ||
| 43 | uint8_t *fg; | ||
| 44 | |||
| 45 | uint64_t counter[64]; | ||
| 46 | |||
| 47 | AVFrame *outpicref; | ||
| 48 | } AudioBitScopeContext; | ||
| 49 | |||
| 50 | #define OFFSET(x) offsetof(AudioBitScopeContext, x) | ||
| 51 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | ||
| 52 | |||
| 53 | static const AVOption abitscope_options[] = { | ||
| 54 | { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS }, | ||
| 55 | { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="25"}, 0, INT_MAX, FLAGS }, | ||
| 56 | { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="1024x256"}, 0, 0, FLAGS }, | ||
| 57 | { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="1024x256"}, 0, 0, FLAGS }, | ||
| 58 | { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS }, | ||
| 59 | { "mode", "set output mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, .unit = "mode" }, | ||
| 60 | { "m", "set output mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, .unit = "mode" }, | ||
| 61 | { "bars", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, .unit = "mode" }, | ||
| 62 | { "trace", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, .unit = "mode" }, | ||
| 63 | { NULL } | ||
| 64 | }; | ||
| 65 | |||
| 66 | AVFILTER_DEFINE_CLASS(abitscope); | ||
| 67 | |||
| 68 | ✗ | static int query_formats(const AVFilterContext *ctx, | |
| 69 | AVFilterFormatsConfig **cfg_in, | ||
| 70 | AVFilterFormatsConfig **cfg_out) | ||
| 71 | { | ||
| 72 | ✗ | AVFilterFormats *formats = NULL; | |
| 73 | static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P, | ||
| 74 | AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S64P, | ||
| 75 | AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP, | ||
| 76 | AV_SAMPLE_FMT_NONE }; | ||
| 77 | static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE }; | ||
| 78 | int ret; | ||
| 79 | |||
| 80 | ✗ | formats = ff_make_sample_format_list(sample_fmts); | |
| 81 | ✗ | if ((ret = ff_formats_ref(formats, &cfg_in[0]->formats)) < 0) | |
| 82 | ✗ | return ret; | |
| 83 | |||
| 84 | ✗ | formats = ff_make_pixel_format_list(pix_fmts); | |
| 85 | ✗ | if ((ret = ff_formats_ref(formats, &cfg_out[0]->formats)) < 0) | |
| 86 | ✗ | return ret; | |
| 87 | |||
| 88 | ✗ | return 0; | |
| 89 | } | ||
| 90 | |||
| 91 | ✗ | static int config_input(AVFilterLink *inlink) | |
| 92 | { | ||
| 93 | ✗ | AVFilterContext *ctx = inlink->dst; | |
| 94 | ✗ | AudioBitScopeContext *s = ctx->priv; | |
| 95 | int ch; | ||
| 96 | ✗ | char *colors, *saveptr = NULL; | |
| 97 | |||
| 98 | ✗ | s->nb_samples = FFMAX(1, av_rescale(inlink->sample_rate, s->frame_rate.den, s->frame_rate.num)); | |
| 99 | ✗ | s->nb_channels = inlink->ch_layout.nb_channels; | |
| 100 | ✗ | s->depth = inlink->format == AV_SAMPLE_FMT_S16P ? 16 : 32; | |
| 101 | |||
| 102 | ✗ | s->fg = av_malloc_array(s->nb_channels, 4 * sizeof(*s->fg)); | |
| 103 | ✗ | if (!s->fg) | |
| 104 | ✗ | return AVERROR(ENOMEM); | |
| 105 | |||
| 106 | ✗ | colors = av_strdup(s->colors); | |
| 107 | ✗ | if (!colors) | |
| 108 | ✗ | return AVERROR(ENOMEM); | |
| 109 | |||
| 110 | ✗ | for (ch = 0; ch < s->nb_channels; ch++) { | |
| 111 | ✗ | uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff }; | |
| 112 | char *color; | ||
| 113 | |||
| 114 | ✗ | color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr); | |
| 115 | ✗ | if (color) | |
| 116 | ✗ | av_parse_color(fg, color, -1, ctx); | |
| 117 | ✗ | s->fg[4 * ch + 0] = fg[0]; | |
| 118 | ✗ | s->fg[4 * ch + 1] = fg[1]; | |
| 119 | ✗ | s->fg[4 * ch + 2] = fg[2]; | |
| 120 | ✗ | s->fg[4 * ch + 3] = fg[3]; | |
| 121 | } | ||
| 122 | ✗ | av_free(colors); | |
| 123 | |||
| 124 | ✗ | return 0; | |
| 125 | } | ||
| 126 | |||
| 127 | ✗ | static int config_output(AVFilterLink *outlink) | |
| 128 | { | ||
| 129 | ✗ | AudioBitScopeContext *s = outlink->src->priv; | |
| 130 | ✗ | FilterLink *l = ff_filter_link(outlink); | |
| 131 | |||
| 132 | ✗ | outlink->w = s->w; | |
| 133 | ✗ | outlink->h = s->h; | |
| 134 | ✗ | outlink->sample_aspect_ratio = (AVRational){1,1}; | |
| 135 | ✗ | l->frame_rate = s->frame_rate; | |
| 136 | ✗ | outlink->time_base = av_inv_q(l->frame_rate); | |
| 137 | |||
| 138 | ✗ | return 0; | |
| 139 | } | ||
| 140 | |||
| 141 | #define BITCOUNTER(type, depth, one) \ | ||
| 142 | memset(counter, 0, sizeof(s->counter)); \ | ||
| 143 | for (int i = 0; i < nb_samples; i++) { \ | ||
| 144 | const type x = in[i]; \ | ||
| 145 | for (int j = 0; j < depth && x; j++) \ | ||
| 146 | counter[j] += !!(x & (one << j)); \ | ||
| 147 | } | ||
| 148 | |||
| 149 | #define BARS(type, depth, one) \ | ||
| 150 | for (int ch = 0; ch < inlink->ch_layout.nb_channels; ch++) { \ | ||
| 151 | const int nb_samples = insamples->nb_samples; \ | ||
| 152 | const type *in = (const type *)insamples->extended_data[ch]; \ | ||
| 153 | const int w = outpicref->width / inlink->ch_layout.nb_channels; \ | ||
| 154 | const int h = outpicref->height / depth; \ | ||
| 155 | const uint32_t color = AV_RN32(&s->fg[4 * ch]); \ | ||
| 156 | uint64_t *counter = s->counter; \ | ||
| 157 | \ | ||
| 158 | BITCOUNTER(type, depth, one) \ | ||
| 159 | \ | ||
| 160 | for (int b = 0; b < depth; b++) { \ | ||
| 161 | for (int j = 1; j < h - 1; j++) { \ | ||
| 162 | uint8_t *dst = outpicref->data[0] + (b * h + j) * outpicref->linesize[0] + w * ch * 4; \ | ||
| 163 | const int ww = (counter[depth - b - 1] / (float)nb_samples) * (w - 1); \ | ||
| 164 | \ | ||
| 165 | for (int i = 0; i < ww; i++) { \ | ||
| 166 | AV_WN32(&dst[i * 4], color); \ | ||
| 167 | } \ | ||
| 168 | } \ | ||
| 169 | } \ | ||
| 170 | } | ||
| 171 | |||
| 172 | #define DO_TRACE(type, depth, one) \ | ||
| 173 | for (int ch = 0; ch < inlink->ch_layout.nb_channels; ch++) { \ | ||
| 174 | const int nb_samples = insamples->nb_samples; \ | ||
| 175 | const int w = outpicref->width / inlink->ch_layout.nb_channels; \ | ||
| 176 | const type *in = (const type *)insamples->extended_data[ch]; \ | ||
| 177 | uint64_t *counter = s->counter; \ | ||
| 178 | const int wb = w / depth; \ | ||
| 179 | int wv; \ | ||
| 180 | \ | ||
| 181 | BITCOUNTER(type, depth, one) \ | ||
| 182 | \ | ||
| 183 | for (int b = 0; b < depth; b++) { \ | ||
| 184 | uint8_t colors[4]; \ | ||
| 185 | uint32_t color; \ | ||
| 186 | uint8_t *dst = outpicref->data[0] + w * ch * 4 + wb * b * 4 + \ | ||
| 187 | s->current_vpos * outpicref->linesize[0]; \ | ||
| 188 | wv = (counter[depth - b - 1] * 255) / nb_samples; \ | ||
| 189 | colors[0] = (wv * s->fg[ch * 4 + 0] + 127) / 255; \ | ||
| 190 | colors[1] = (wv * s->fg[ch * 4 + 1] + 127) / 255; \ | ||
| 191 | colors[2] = (wv * s->fg[ch * 4 + 2] + 127) / 255; \ | ||
| 192 | colors[3] = (wv * s->fg[ch * 4 + 3] + 127) / 255; \ | ||
| 193 | color = AV_RN32(colors); \ | ||
| 194 | \ | ||
| 195 | for (int x = 0; x < wb; x++) \ | ||
| 196 | AV_WN32(&dst[x * 4], color); \ | ||
| 197 | } \ | ||
| 198 | } | ||
| 199 | |||
| 200 | ✗ | static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) | |
| 201 | { | ||
| 202 | ✗ | AVFilterContext *ctx = inlink->dst; | |
| 203 | ✗ | AVFilterLink *outlink = ctx->outputs[0]; | |
| 204 | ✗ | AudioBitScopeContext *s = ctx->priv; | |
| 205 | AVFrame *outpicref; | ||
| 206 | int ret; | ||
| 207 | |||
| 208 | ✗ | if (s->mode == 0 || !s->outpicref) { | |
| 209 | ✗ | outpicref = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |
| 210 | ✗ | if (!outpicref) { | |
| 211 | ✗ | av_frame_free(&insamples); | |
| 212 | ✗ | return AVERROR(ENOMEM); | |
| 213 | } | ||
| 214 | |||
| 215 | ✗ | for (int i = 0; i < outlink->h; i++) | |
| 216 | ✗ | memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w * 4); | |
| 217 | ✗ | if (!s->outpicref && s->mode == 1) | |
| 218 | ✗ | s->outpicref = outpicref; | |
| 219 | } | ||
| 220 | |||
| 221 | ✗ | if (s->mode == 1) { | |
| 222 | ✗ | ret = ff_inlink_make_frame_writable(outlink, &s->outpicref); | |
| 223 | ✗ | if (ret < 0) { | |
| 224 | ✗ | av_frame_free(&insamples); | |
| 225 | ✗ | return ret; | |
| 226 | } | ||
| 227 | ✗ | outpicref = av_frame_clone(s->outpicref); | |
| 228 | ✗ | if (!outpicref) { | |
| 229 | ✗ | av_frame_free(&insamples); | |
| 230 | ✗ | return AVERROR(ENOMEM); | |
| 231 | } | ||
| 232 | } | ||
| 233 | |||
| 234 | ✗ | outpicref->pts = av_rescale_q(insamples->pts, inlink->time_base, outlink->time_base); | |
| 235 | ✗ | outpicref->duration = 1; | |
| 236 | ✗ | outpicref->sample_aspect_ratio = (AVRational){1,1}; | |
| 237 | |||
| 238 | ✗ | switch (insamples->format) { | |
| 239 | ✗ | case AV_SAMPLE_FMT_U8P: | |
| 240 | ✗ | if (s->mode == 0) { BARS(uint8_t, 8, 1) } else { DO_TRACE(uint8_t, 8, 1) } | |
| 241 | ✗ | break; | |
| 242 | ✗ | case AV_SAMPLE_FMT_S16P: | |
| 243 | ✗ | if (s->mode == 0) { BARS(uint16_t, 16, 1) } else { DO_TRACE(uint16_t, 16, 1) } | |
| 244 | ✗ | break; | |
| 245 | ✗ | case AV_SAMPLE_FMT_FLTP: | |
| 246 | case AV_SAMPLE_FMT_S32P: | ||
| 247 | ✗ | if (s->mode == 0) { BARS(uint32_t, 32, 1U) } else { DO_TRACE(uint32_t, 32, 1U) } | |
| 248 | ✗ | break; | |
| 249 | ✗ | case AV_SAMPLE_FMT_DBLP: | |
| 250 | case AV_SAMPLE_FMT_S64P: | ||
| 251 | ✗ | if (s->mode == 0) { BARS(uint64_t, 64, 1ULL) } else { DO_TRACE(uint64_t, 64, 1ULL) } | |
| 252 | ✗ | break; | |
| 253 | } | ||
| 254 | |||
| 255 | ✗ | s->current_vpos++; | |
| 256 | ✗ | if (s->current_vpos >= outlink->h) | |
| 257 | ✗ | s->current_vpos = 0; | |
| 258 | ✗ | av_frame_free(&insamples); | |
| 259 | |||
| 260 | ✗ | return ff_filter_frame(outlink, outpicref); | |
| 261 | } | ||
| 262 | |||
| 263 | ✗ | static int activate(AVFilterContext *ctx) | |
| 264 | { | ||
| 265 | ✗ | AVFilterLink *inlink = ctx->inputs[0]; | |
| 266 | ✗ | AVFilterLink *outlink = ctx->outputs[0]; | |
| 267 | ✗ | AudioBitScopeContext *s = ctx->priv; | |
| 268 | AVFrame *in; | ||
| 269 | int ret; | ||
| 270 | |||
| 271 | ✗ | FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); | |
| 272 | |||
| 273 | ✗ | ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in); | |
| 274 | ✗ | if (ret < 0) | |
| 275 | ✗ | return ret; | |
| 276 | ✗ | if (ret > 0) | |
| 277 | ✗ | return filter_frame(inlink, in); | |
| 278 | |||
| 279 | ✗ | FF_FILTER_FORWARD_STATUS(inlink, outlink); | |
| 280 | ✗ | FF_FILTER_FORWARD_WANTED(outlink, inlink); | |
| 281 | |||
| 282 | ✗ | return FFERROR_NOT_READY; | |
| 283 | } | ||
| 284 | |||
| 285 | ✗ | static av_cold void uninit(AVFilterContext *ctx) | |
| 286 | { | ||
| 287 | ✗ | AudioBitScopeContext *s = ctx->priv; | |
| 288 | |||
| 289 | ✗ | av_frame_free(&s->outpicref); | |
| 290 | ✗ | } | |
| 291 | |||
| 292 | static const AVFilterPad inputs[] = { | ||
| 293 | { | ||
| 294 | .name = "default", | ||
| 295 | .type = AVMEDIA_TYPE_AUDIO, | ||
| 296 | .config_props = config_input, | ||
| 297 | }, | ||
| 298 | }; | ||
| 299 | |||
| 300 | static const AVFilterPad outputs[] = { | ||
| 301 | { | ||
| 302 | .name = "default", | ||
| 303 | .type = AVMEDIA_TYPE_VIDEO, | ||
| 304 | .config_props = config_output, | ||
| 305 | }, | ||
| 306 | }; | ||
| 307 | |||
| 308 | const FFFilter ff_avf_abitscope = { | ||
| 309 | .p.name = "abitscope", | ||
| 310 | .p.description = NULL_IF_CONFIG_SMALL("Convert input audio to audio bit scope video output."), | ||
| 311 | .p.priv_class = &abitscope_class, | ||
| 312 | .priv_size = sizeof(AudioBitScopeContext), | ||
| 313 | FILTER_INPUTS(inputs), | ||
| 314 | FILTER_OUTPUTS(outputs), | ||
| 315 | FILTER_QUERY_FUNC2(query_formats), | ||
| 316 | .uninit = uninit, | ||
| 317 | .activate = activate, | ||
| 318 | }; | ||
| 319 |