LCOV - code coverage report
Current view: top level - libavfilter - vf_mix.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 167 0.0 %
Date: 2018-05-20 11:54:08 Functions: 0 8 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2017 Paul B Mahol
       3             :  *
       4             :  * This file is part of FFmpeg.
       5             :  *
       6             :  * FFmpeg is free software; you can redistribute it and/or
       7             :  * modify it under the terms of the GNU Lesser General Public
       8             :  * License as published by the Free Software Foundation; either
       9             :  * version 2.1 of the License, or (at your option) any later version.
      10             :  *
      11             :  * FFmpeg is distributed in the hope that it will be useful,
      12             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      13             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      14             :  * Lesser General Public License for more details.
      15             :  *
      16             :  * You should have received a copy of the GNU Lesser General Public
      17             :  * License along with FFmpeg; if not, write to the Free Software
      18             :  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
      19             :  */
      20             : 
      21             : #include "libavutil/avstring.h"
      22             : #include "libavutil/imgutils.h"
      23             : #include "libavutil/intreadwrite.h"
      24             : #include "libavutil/opt.h"
      25             : #include "libavutil/pixdesc.h"
      26             : 
      27             : #include "avfilter.h"
      28             : #include "formats.h"
      29             : #include "internal.h"
      30             : #include "framesync.h"
      31             : #include "video.h"
      32             : 
      33             : typedef struct MixContext {
      34             :     const AVClass *class;
      35             :     const AVPixFmtDescriptor *desc;
      36             :     char *weights_str;
      37             :     int nb_inputs;
      38             :     int duration;
      39             :     float *weights;
      40             :     float scale;
      41             :     float wfactor;
      42             : 
      43             :     int tmix;
      44             :     int nb_frames;
      45             : 
      46             :     int depth;
      47             :     int max;
      48             :     int nb_planes;
      49             :     int linesize[4];
      50             :     int height[4];
      51             : 
      52             :     AVFrame **frames;
      53             :     FFFrameSync fs;
      54             : } MixContext;
      55             : 
      56           0 : static int query_formats(AVFilterContext *ctx)
      57             : {
      58           0 :     AVFilterFormats *pix_fmts = NULL;
      59             :     int fmt, ret;
      60             : 
      61           0 :     for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
      62           0 :         const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
      63           0 :         if (!(desc->flags & AV_PIX_FMT_FLAG_PAL ||
      64           0 :               desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
      65           0 :               desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) &&
      66           0 :             (ret = ff_add_format(&pix_fmts, fmt)) < 0)
      67           0 :             return ret;
      68             :     }
      69             : 
      70           0 :     return ff_set_common_formats(ctx, pix_fmts);
      71             : }
      72             : 
      73           0 : static av_cold int init(AVFilterContext *ctx)
      74             : {
      75           0 :     MixContext *s = ctx->priv;
      76           0 :     char *p, *arg, *saveptr = NULL;
      77           0 :     int i, ret, last = 0;
      78             : 
      79           0 :     s->tmix = !strcmp(ctx->filter->name, "tmix");
      80             : 
      81           0 :     s->frames = av_calloc(s->nb_inputs, sizeof(*s->frames));
      82           0 :     if (!s->frames)
      83           0 :         return AVERROR(ENOMEM);
      84             : 
      85           0 :     s->weights = av_calloc(s->nb_inputs, sizeof(*s->weights));
      86           0 :     if (!s->weights)
      87           0 :         return AVERROR(ENOMEM);
      88             : 
      89           0 :     if (!s->tmix) {
      90           0 :         for (i = 0; i < s->nb_inputs; i++) {
      91           0 :             AVFilterPad pad = { 0 };
      92             : 
      93           0 :             pad.type = AVMEDIA_TYPE_VIDEO;
      94           0 :             pad.name = av_asprintf("input%d", i);
      95           0 :             if (!pad.name)
      96           0 :                 return AVERROR(ENOMEM);
      97             : 
      98           0 :             if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
      99           0 :                 av_freep(&pad.name);
     100           0 :                 return ret;
     101             :             }
     102             :         }
     103             :     }
     104             : 
     105           0 :     p = s->weights_str;
     106           0 :     for (i = 0; i < s->nb_inputs; i++) {
     107           0 :         if (!(arg = av_strtok(p, " ", &saveptr)))
     108           0 :             break;
     109             : 
     110           0 :         p = NULL;
     111           0 :         sscanf(arg, "%f", &s->weights[i]);
     112           0 :         s->wfactor += s->weights[i];
     113           0 :         last = i;
     114             :     }
     115           0 :     for (; i < s->nb_inputs; i++) {
     116           0 :         s->weights[i] = s->weights[last];
     117           0 :         s->wfactor += s->weights[i];
     118             :     }
     119           0 :     if (s->scale == 0) {
     120           0 :         s->wfactor = 1 / s->wfactor;
     121             :     } else {
     122           0 :         s->wfactor = s->scale;
     123             :     }
     124             : 
     125           0 :     return 0;
     126             : }
     127             : 
     128             : typedef struct ThreadData {
     129             :     AVFrame **in, *out;
     130             : } ThreadData;
     131             : 
     132           0 : static int mix_frames(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
     133             : {
     134           0 :     MixContext *s = ctx->priv;
     135           0 :     ThreadData *td = arg;
     136           0 :     AVFrame **in = td->in;
     137           0 :     AVFrame *out = td->out;
     138             :     int i, p, x, y;
     139             : 
     140           0 :     if (s->depth <= 8) {
     141           0 :         for (p = 0; p < s->nb_planes; p++) {
     142           0 :             const int slice_start = (s->height[p] * jobnr) / nb_jobs;
     143           0 :             const int slice_end = (s->height[p] * (jobnr+1)) / nb_jobs;
     144           0 :             uint8_t *dst = out->data[p] + slice_start * out->linesize[p];
     145             : 
     146           0 :             for (y = slice_start; y < slice_end; y++) {
     147           0 :                 for (x = 0; x < s->linesize[p]; x++) {
     148           0 :                     int val = 0;
     149             : 
     150           0 :                     for (i = 0; i < s->nb_inputs; i++) {
     151           0 :                         uint8_t src = in[i]->data[p][y * in[i]->linesize[p] + x];
     152             : 
     153           0 :                         val += src * s->weights[i];
     154             :                     }
     155             : 
     156           0 :                     dst[x] = av_clip_uint8(val * s->wfactor);
     157             :                 }
     158             : 
     159           0 :                 dst += out->linesize[p];
     160             :             }
     161             :         }
     162             :     } else {
     163           0 :         for (p = 0; p < s->nb_planes; p++) {
     164           0 :             const int slice_start = (s->height[p] * jobnr) / nb_jobs;
     165           0 :             const int slice_end = (s->height[p] * (jobnr+1)) / nb_jobs;
     166           0 :             uint16_t *dst = (uint16_t *)(out->data[p] + slice_start * out->linesize[p]);
     167             : 
     168           0 :             for (y = slice_start; y < slice_end; y++) {
     169           0 :                 for (x = 0; x < s->linesize[p] / 2; x++) {
     170           0 :                     int val = 0;
     171             : 
     172           0 :                     for (i = 0; i < s->nb_inputs; i++) {
     173           0 :                         uint16_t src = AV_RN16(in[i]->data[p] + y * in[i]->linesize[p] + x * 2);
     174             : 
     175           0 :                         val += src * s->weights[i];
     176             :                     }
     177             : 
     178           0 :                     dst[x] = av_clip(val * s->wfactor, 0, s->max);
     179             :                 }
     180             : 
     181           0 :                 dst += out->linesize[p] / 2;
     182             :             }
     183             :         }
     184             :     }
     185             : 
     186           0 :     return 0;
     187             : }
     188             : 
     189           0 : static int process_frame(FFFrameSync *fs)
     190             : {
     191           0 :     AVFilterContext *ctx = fs->parent;
     192           0 :     AVFilterLink *outlink = ctx->outputs[0];
     193           0 :     MixContext *s = fs->opaque;
     194           0 :     AVFrame **in = s->frames;
     195             :     AVFrame *out;
     196             :     ThreadData td;
     197             :     int i, ret;
     198             : 
     199           0 :     for (i = 0; i < s->nb_inputs; i++) {
     200           0 :         if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
     201           0 :             return ret;
     202             :     }
     203             : 
     204           0 :     out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
     205           0 :     if (!out)
     206           0 :         return AVERROR(ENOMEM);
     207           0 :     out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
     208             : 
     209           0 :     td.in = in;
     210           0 :     td.out = out;
     211           0 :     ctx->internal->execute(ctx, mix_frames, &td, NULL, FFMIN(s->height[0], ff_filter_get_nb_threads(ctx)));
     212             : 
     213           0 :     return ff_filter_frame(outlink, out);
     214             : }
     215             : 
     216           0 : static int config_output(AVFilterLink *outlink)
     217             : {
     218           0 :     AVFilterContext *ctx = outlink->src;
     219           0 :     MixContext *s = ctx->priv;
     220           0 :     AVRational time_base = ctx->inputs[0]->time_base;
     221           0 :     AVRational frame_rate = ctx->inputs[0]->frame_rate;
     222           0 :     AVFilterLink *inlink = ctx->inputs[0];
     223           0 :     int height = ctx->inputs[0]->h;
     224           0 :     int width = ctx->inputs[0]->w;
     225             :     FFFrameSyncIn *in;
     226             :     int i, ret;
     227             : 
     228           0 :     if (!s->tmix) {
     229           0 :         for (i = 1; i < s->nb_inputs; i++) {
     230           0 :             if (ctx->inputs[i]->h != height || ctx->inputs[i]->w != width) {
     231           0 :                 av_log(ctx, AV_LOG_ERROR, "Input %d size (%dx%d) does not match input %d size (%dx%d).\n", i, ctx->inputs[i]->w, ctx->inputs[i]->h, 0, width, height);
     232           0 :                 return AVERROR(EINVAL);
     233             :             }
     234             :         }
     235             :     }
     236             : 
     237           0 :     s->desc = av_pix_fmt_desc_get(outlink->format);
     238           0 :     if (!s->desc)
     239           0 :         return AVERROR_BUG;
     240           0 :     s->nb_planes = av_pix_fmt_count_planes(outlink->format);
     241           0 :     s->depth = s->desc->comp[0].depth;
     242           0 :     s->max = (1 << s->depth) - 1;
     243             : 
     244           0 :     if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
     245           0 :         return ret;
     246             : 
     247           0 :     s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
     248           0 :     s->height[0] = s->height[3] = inlink->h;
     249             : 
     250           0 :     if (s->tmix)
     251           0 :         return 0;
     252             : 
     253           0 :     outlink->w          = width;
     254           0 :     outlink->h          = height;
     255           0 :     outlink->time_base  = time_base;
     256           0 :     outlink->frame_rate = frame_rate;
     257             : 
     258           0 :     if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0)
     259           0 :         return ret;
     260             : 
     261           0 :     in = s->fs.in;
     262           0 :     s->fs.opaque = s;
     263           0 :     s->fs.on_event = process_frame;
     264             : 
     265           0 :     for (i = 0; i < s->nb_inputs; i++) {
     266           0 :         AVFilterLink *inlink = ctx->inputs[i];
     267             : 
     268           0 :         in[i].time_base = inlink->time_base;
     269           0 :         in[i].sync   = 1;
     270           0 :         in[i].before = EXT_STOP;
     271           0 :         in[i].after  = (s->duration == 1 || (s->duration == 2 && i == 0)) ? EXT_STOP : EXT_INFINITY;
     272             :     }
     273             : 
     274           0 :     return ff_framesync_configure(&s->fs);
     275             : }
     276             : 
     277           0 : static av_cold void uninit(AVFilterContext *ctx)
     278             : {
     279           0 :     MixContext *s = ctx->priv;
     280             :     int i;
     281             : 
     282           0 :     ff_framesync_uninit(&s->fs);
     283           0 :     av_freep(&s->weights);
     284             : 
     285           0 :     if (!s->tmix) {
     286           0 :         for (i = 0; i < ctx->nb_inputs; i++)
     287           0 :             av_freep(&ctx->input_pads[i].name);
     288             :     } else {
     289           0 :         for (i = 0; i < s->nb_frames; i++)
     290           0 :             av_frame_free(&s->frames[i]);
     291             :     }
     292           0 :     av_freep(&s->frames);
     293           0 : }
     294             : 
     295           0 : static int activate(AVFilterContext *ctx)
     296             : {
     297           0 :     MixContext *s = ctx->priv;
     298           0 :     return ff_framesync_activate(&s->fs);
     299             : }
     300             : 
     301             : #define OFFSET(x) offsetof(MixContext, x)
     302             : #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
     303             : 
     304             : static const AVOption mix_options[] = {
     305             :     { "inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=2}, 2, INT_MAX, .flags = FLAGS },
     306             :     { "weights", "set weight for each input", OFFSET(weights_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, .flags = FLAGS },
     307             :     { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, INT16_MAX, .flags = FLAGS },
     308             :     { "duration", "how to determine end of stream", OFFSET(duration), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, .flags = FLAGS, "duration" },
     309             :         { "longest",  "Duration of longest input",  0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "duration" },
     310             :         { "shortest", "Duration of shortest input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "duration" },
     311             :         { "first",    "Duration of first input",    0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "duration" },
     312             :     { NULL },
     313             : };
     314             : 
     315             : static const AVFilterPad outputs[] = {
     316             :     {
     317             :         .name          = "default",
     318             :         .type          = AVMEDIA_TYPE_VIDEO,
     319             :         .config_props  = config_output,
     320             :     },
     321             :     { NULL }
     322             : };
     323             : 
     324             : #if CONFIG_MIX_FILTER
     325             : AVFILTER_DEFINE_CLASS(mix);
     326             : 
     327             : AVFilter ff_vf_mix = {
     328             :     .name          = "mix",
     329             :     .description   = NULL_IF_CONFIG_SMALL("Mix video inputs."),
     330             :     .priv_size     = sizeof(MixContext),
     331             :     .priv_class    = &mix_class,
     332             :     .query_formats = query_formats,
     333             :     .outputs       = outputs,
     334             :     .init          = init,
     335             :     .uninit        = uninit,
     336             :     .activate      = activate,
     337             :     .flags         = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_SLICE_THREADS,
     338             : };
     339             : 
     340             : #endif /* CONFIG_MIX_FILTER */
     341             : 
     342             : #if CONFIG_TMIX_FILTER
     343           0 : static int tmix_filter_frame(AVFilterLink *inlink, AVFrame *in)
     344             : {
     345           0 :     AVFilterContext *ctx = inlink->dst;
     346           0 :     AVFilterLink *outlink = ctx->outputs[0];
     347           0 :     MixContext *s = ctx->priv;
     348             :     ThreadData td;
     349             :     AVFrame *out;
     350             : 
     351           0 :     if (s->nb_frames < s->nb_inputs) {
     352           0 :         s->frames[s->nb_frames] = in;
     353           0 :         s->nb_frames++;
     354           0 :         return 0;
     355             :     } else {
     356           0 :         av_frame_free(&s->frames[0]);
     357           0 :         memmove(&s->frames[0], &s->frames[1], sizeof(*s->frames) * (s->nb_inputs - 1));
     358           0 :         s->frames[s->nb_inputs - 1] = in;
     359             :     }
     360             : 
     361           0 :     out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
     362           0 :     if (!out)
     363           0 :         return AVERROR(ENOMEM);
     364           0 :     out->pts = s->frames[0]->pts;
     365             : 
     366           0 :     td.out = out;
     367           0 :     td.in = s->frames;
     368           0 :     ctx->internal->execute(ctx, mix_frames, &td, NULL, FFMIN(s->height[0], ff_filter_get_nb_threads(ctx)));
     369             : 
     370           0 :     return ff_filter_frame(outlink, out);
     371             : }
     372             : 
     373             : static const AVOption tmix_options[] = {
     374             :     { "frames", "set number of successive frames to mix", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=3}, 2, 128, .flags = FLAGS },
     375             :     { "weights", "set weight for each frame", OFFSET(weights_str), AV_OPT_TYPE_STRING, {.str="1 1 1"}, 0, 0, .flags = FLAGS },
     376             :     { "scale", "set scale", OFFSET(scale), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, INT16_MAX, .flags = FLAGS },
     377             :     { NULL },
     378             : };
     379             : 
     380             : static const AVFilterPad inputs[] = {
     381             :     {
     382             :         .name          = "default",
     383             :         .type          = AVMEDIA_TYPE_VIDEO,
     384             :         .filter_frame  = tmix_filter_frame,
     385             :     },
     386             :     { NULL }
     387             : };
     388             : 
     389             : AVFILTER_DEFINE_CLASS(tmix);
     390             : 
     391             : AVFilter ff_vf_tmix = {
     392             :     .name          = "tmix",
     393             :     .description   = NULL_IF_CONFIG_SMALL("Mix successive video frames."),
     394             :     .priv_size     = sizeof(MixContext),
     395             :     .priv_class    = &tmix_class,
     396             :     .query_formats = query_formats,
     397             :     .outputs       = outputs,
     398             :     .inputs        = inputs,
     399             :     .init          = init,
     400             :     .uninit        = uninit,
     401             :     .flags         = AVFILTER_FLAG_SLICE_THREADS,
     402             : };
     403             : 
     404             : #endif /* CONFIG_TMIX_FILTER */

Generated by: LCOV version 1.13