LCOV - code coverage report
Current view: top level - libavfilter - vf_maskedclamp.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 156 0.0 %
Date: 2018-05-20 11:54:08 Functions: 0 8 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2016 Paul B Mahol
       3             :  *
       4             :  * This file is part of FFmpeg.
       5             :  *
       6             :  * FFmpeg is free software; you can redistribute it and/or
       7             :  * modify it under the terms of the GNU Lesser General Public
       8             :  * License as published by the Free Software Foundation; either
       9             :  * version 2.1 of the License, or (at your option) any later version.
      10             :  *
      11             :  * FFmpeg is distributed in the hope that it will be useful,
      12             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      13             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      14             :  * Lesser General Public License for more details.
      15             :  *
      16             :  * You should have received a copy of the GNU Lesser General Public
      17             :  * License along with FFmpeg; if not, write to the Free Software
      18             :  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
      19             :  */
      20             : 
      21             : #include "libavutil/imgutils.h"
      22             : #include "libavutil/pixdesc.h"
      23             : #include "libavutil/opt.h"
      24             : #include "avfilter.h"
      25             : #include "formats.h"
      26             : #include "internal.h"
      27             : #include "video.h"
      28             : #include "framesync.h"
      29             : 
      30             : #define OFFSET(x) offsetof(MaskedClampContext, x)
      31             : #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
      32             : 
      33             : typedef struct ThreadData {
      34             :     AVFrame *b, *o, *m, *d;
      35             : } ThreadData;
      36             : 
      37             : typedef struct MaskedClampContext {
      38             :     const AVClass *class;
      39             : 
      40             :     int planes;
      41             :     int undershoot;
      42             :     int overshoot;
      43             : 
      44             :     int linesize[4];
      45             :     int width[4], height[4];
      46             :     int nb_planes;
      47             :     int depth;
      48             :     FFFrameSync fs;
      49             : 
      50             :     int (*maskedclamp)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
      51             : } MaskedClampContext;
      52             : 
      53             : static const AVOption maskedclamp_options[] = {
      54             :     { "undershoot", "set undershoot", OFFSET(undershoot), AV_OPT_TYPE_INT, {.i64=0},   0, UINT16_MAX, FLAGS },
      55             :     { "overshoot",  "set overshoot",  OFFSET(overshoot),  AV_OPT_TYPE_INT, {.i64=0},   0, UINT16_MAX, FLAGS },
      56             :     { "planes",     "set planes",     OFFSET(planes),     AV_OPT_TYPE_INT, {.i64=0xF}, 0, 0xF,        FLAGS },
      57             :     { NULL }
      58             : };
      59             : 
      60             : AVFILTER_DEFINE_CLASS(maskedclamp);
      61             : 
      62           0 : static int query_formats(AVFilterContext *ctx)
      63             : {
      64             :     static const enum AVPixelFormat pix_fmts[] = {
      65             :         AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
      66             :         AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
      67             :         AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
      68             :         AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
      69             :         AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
      70             :         AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
      71             :         AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
      72             :         AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
      73             :         AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
      74             :         AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
      75             :         AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
      76             :         AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
      77             :         AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
      78             :         AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
      79             :         AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
      80             :         AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
      81             :         AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY16,
      82             :         AV_PIX_FMT_NONE
      83             :     };
      84             : 
      85           0 :     return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
      86             : }
      87             : 
      88           0 : static int process_frame(FFFrameSync *fs)
      89             : {
      90           0 :     AVFilterContext *ctx = fs->parent;
      91           0 :     MaskedClampContext *s = fs->opaque;
      92           0 :     AVFilterLink *outlink = ctx->outputs[0];
      93             :     AVFrame *out, *base, *dark, *bright;
      94             :     int ret;
      95             : 
      96           0 :     if ((ret = ff_framesync_get_frame(&s->fs, 0, &base,   0)) < 0 ||
      97           0 :         (ret = ff_framesync_get_frame(&s->fs, 1, &dark,   0)) < 0 ||
      98           0 :         (ret = ff_framesync_get_frame(&s->fs, 2, &bright, 0)) < 0)
      99           0 :         return ret;
     100             : 
     101           0 :     if (ctx->is_disabled) {
     102           0 :         out = av_frame_clone(base);
     103           0 :         if (!out)
     104           0 :             return AVERROR(ENOMEM);
     105             :     } else {
     106             :         ThreadData td;
     107             : 
     108           0 :         out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
     109           0 :         if (!out)
     110           0 :             return AVERROR(ENOMEM);
     111           0 :         av_frame_copy_props(out, base);
     112             : 
     113           0 :         td.b = base;
     114           0 :         td.o = dark;
     115           0 :         td.m = bright;
     116           0 :         td.d = out;
     117             : 
     118           0 :         ctx->internal->execute(ctx, s->maskedclamp, &td, NULL, FFMIN(s->height[0],
     119             :                                                                      ff_filter_get_nb_threads(ctx)));
     120             :     }
     121           0 :     out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
     122             : 
     123           0 :     return ff_filter_frame(outlink, out);
     124             : }
     125             : 
     126           0 : static int maskedclamp8(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
     127             : {
     128           0 :     MaskedClampContext *s = ctx->priv;
     129           0 :     ThreadData *td = arg;
     130             :     int p;
     131             : 
     132           0 :     for (p = 0; p < s->nb_planes; p++) {
     133           0 :         const ptrdiff_t blinesize = td->b->linesize[p];
     134           0 :         const ptrdiff_t brightlinesize = td->m->linesize[p];
     135           0 :         const ptrdiff_t darklinesize = td->o->linesize[p];
     136           0 :         const ptrdiff_t dlinesize = td->d->linesize[p];
     137           0 :         const int w = s->width[p];
     138           0 :         const int h = s->height[p];
     139           0 :         const int slice_start = (h * jobnr) / nb_jobs;
     140           0 :         const int slice_end = (h * (jobnr+1)) / nb_jobs;
     141           0 :         const uint8_t *bsrc = td->b->data[p] + slice_start * blinesize;
     142           0 :         const uint8_t *darksrc = td->o->data[p] + slice_start * darklinesize;
     143           0 :         const uint8_t *brightsrc = td->m->data[p] + slice_start * brightlinesize;
     144           0 :         uint8_t *dst = td->d->data[p] + slice_start * dlinesize;
     145           0 :         const int undershoot = s->undershoot;
     146           0 :         const int overshoot = s->overshoot;
     147             :         int x, y;
     148             : 
     149           0 :         if (!((1 << p) & s->planes)) {
     150           0 :             av_image_copy_plane(dst, dlinesize, bsrc, blinesize,
     151             :                                 s->linesize[p], slice_end - slice_start);
     152           0 :             continue;
     153             :         }
     154             : 
     155           0 :         for (y = slice_start; y < slice_end; y++) {
     156           0 :             for (x = 0; x < w; x++) {
     157           0 :                 if (bsrc[x] < darksrc[x] - undershoot)
     158           0 :                     dst[x] = darksrc[x] - undershoot;
     159           0 :                 else if (bsrc[x] > brightsrc[x] + overshoot)
     160           0 :                     dst[x] = brightsrc[x] + overshoot;
     161             :                 else
     162           0 :                     dst[x] = bsrc[x];
     163             :             }
     164             : 
     165           0 :             dst  += dlinesize;
     166           0 :             bsrc += blinesize;
     167           0 :             darksrc += darklinesize;
     168           0 :             brightsrc += brightlinesize;
     169             :         }
     170             :     }
     171             : 
     172           0 :     return 0;
     173             : }
     174             : 
     175           0 : static int maskedclamp16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
     176             : {
     177           0 :     MaskedClampContext *s = ctx->priv;
     178           0 :     ThreadData *td = arg;
     179             :     int p;
     180             : 
     181           0 :     for (p = 0; p < s->nb_planes; p++) {
     182           0 :         const ptrdiff_t blinesize = td->b->linesize[p] / 2;
     183           0 :         const ptrdiff_t brightlinesize = td->m->linesize[p] / 2;
     184           0 :         const ptrdiff_t darklinesize = td->o->linesize[p] / 2;
     185           0 :         const ptrdiff_t dlinesize = td->d->linesize[p] / 2;
     186           0 :         const int w = s->width[p];
     187           0 :         const int h = s->height[p];
     188           0 :         const int slice_start = (h * jobnr) / nb_jobs;
     189           0 :         const int slice_end = (h * (jobnr+1)) / nb_jobs;
     190           0 :         const uint16_t *bsrc = (const uint16_t *)td->b->data[p] + slice_start * blinesize;
     191           0 :         const uint16_t *darksrc = (const uint16_t *)td->o->data[p] + slice_start * darklinesize;
     192           0 :         const uint16_t *brightsrc = (const uint16_t *)td->m->data[p] + slice_start * brightlinesize;
     193           0 :         uint16_t *dst = (uint16_t *)td->d->data[p] + slice_start * dlinesize;
     194           0 :         const int undershoot = s->undershoot;
     195           0 :         const int overshoot = s->overshoot;
     196             :         int x, y;
     197             : 
     198           0 :         if (!((1 << p) & s->planes)) {
     199           0 :             av_image_copy_plane((uint8_t *)dst, dlinesize, (const uint8_t *)bsrc, blinesize,
     200             :                                 s->linesize[p], slice_end - slice_start);
     201           0 :             continue;
     202             :         }
     203             : 
     204           0 :         for (y = slice_start; y < slice_end; y++) {
     205           0 :             for (x = 0; x < w; x++) {
     206           0 :                 if (bsrc[x] < darksrc[x] - undershoot)
     207           0 :                     dst[x] = darksrc[x] - undershoot;
     208           0 :                 else if (bsrc[x] > brightsrc[x] + overshoot)
     209           0 :                     dst[x] = brightsrc[x] + overshoot;
     210             :                 else
     211           0 :                     dst[x] = bsrc[x];
     212             :             }
     213             : 
     214           0 :             dst  += dlinesize;
     215           0 :             bsrc += blinesize;
     216           0 :             darksrc += darklinesize;
     217           0 :             brightsrc += brightlinesize;
     218             :         }
     219             :     }
     220             : 
     221           0 :     return 0;
     222             : }
     223             : 
     224           0 : static int config_input(AVFilterLink *inlink)
     225             : {
     226           0 :     AVFilterContext *ctx = inlink->dst;
     227           0 :     MaskedClampContext *s = ctx->priv;
     228           0 :     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
     229             :     int vsub, hsub, ret;
     230             : 
     231           0 :     s->nb_planes = av_pix_fmt_count_planes(inlink->format);
     232             : 
     233           0 :     if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
     234           0 :         return ret;
     235             : 
     236           0 :     hsub = desc->log2_chroma_w;
     237           0 :     vsub = desc->log2_chroma_h;
     238           0 :     s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
     239           0 :     s->height[0] = s->height[3] = inlink->h;
     240           0 :     s->width[1]  = s->width[2]  = AV_CEIL_RSHIFT(inlink->w, hsub);
     241           0 :     s->width[0]  = s->width[3]  = inlink->w;
     242             : 
     243           0 :     s->depth = desc->comp[0].depth;
     244             : 
     245           0 :     if (desc->comp[0].depth == 8)
     246           0 :         s->maskedclamp = maskedclamp8;
     247             :     else
     248           0 :         s->maskedclamp = maskedclamp16;
     249             : 
     250           0 :     return 0;
     251             : }
     252             : 
     253           0 : static int config_output(AVFilterLink *outlink)
     254             : {
     255           0 :     AVFilterContext *ctx = outlink->src;
     256           0 :     MaskedClampContext *s = ctx->priv;
     257           0 :     AVFilterLink *base = ctx->inputs[0];
     258           0 :     AVFilterLink *dark = ctx->inputs[1];
     259           0 :     AVFilterLink *bright = ctx->inputs[2];
     260             :     FFFrameSyncIn *in;
     261             :     int ret;
     262             : 
     263           0 :     if (base->format != dark->format ||
     264           0 :         base->format != bright->format) {
     265           0 :         av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
     266           0 :         return AVERROR(EINVAL);
     267             :     }
     268           0 :     if (base->w != dark->w   || base->h != dark->h ||
     269           0 :         base->w != bright->w || base->h != bright->h) {
     270           0 :         av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
     271             :                "(size %dx%d) do not match the corresponding "
     272             :                "second input link %s parameters (%dx%d) "
     273             :                "and/or third input link %s parameters (size %dx%d)\n",
     274           0 :                ctx->input_pads[0].name, base->w, base->h,
     275           0 :                ctx->input_pads[1].name, dark->w, dark->h,
     276           0 :                ctx->input_pads[2].name, bright->w, bright->h);
     277           0 :         return AVERROR(EINVAL);
     278             :     }
     279             : 
     280           0 :     outlink->w = base->w;
     281           0 :     outlink->h = base->h;
     282           0 :     outlink->time_base = base->time_base;
     283           0 :     outlink->sample_aspect_ratio = base->sample_aspect_ratio;
     284           0 :     outlink->frame_rate = base->frame_rate;
     285             : 
     286           0 :     if ((ret = ff_framesync_init(&s->fs, ctx, 3)) < 0)
     287           0 :         return ret;
     288             : 
     289           0 :     in = s->fs.in;
     290           0 :     in[0].time_base = base->time_base;
     291           0 :     in[1].time_base = dark->time_base;
     292           0 :     in[2].time_base = bright->time_base;
     293           0 :     in[0].sync   = 1;
     294           0 :     in[0].before = EXT_STOP;
     295           0 :     in[0].after  = EXT_INFINITY;
     296           0 :     in[1].sync   = 1;
     297           0 :     in[1].before = EXT_STOP;
     298           0 :     in[1].after  = EXT_INFINITY;
     299           0 :     in[2].sync   = 1;
     300           0 :     in[2].before = EXT_STOP;
     301           0 :     in[2].after  = EXT_INFINITY;
     302           0 :     s->fs.opaque   = s;
     303           0 :     s->fs.on_event = process_frame;
     304             : 
     305           0 :     return ff_framesync_configure(&s->fs);
     306             : }
     307             : 
     308           0 : static int activate(AVFilterContext *ctx)
     309             : {
     310           0 :     MaskedClampContext *s = ctx->priv;
     311           0 :     return ff_framesync_activate(&s->fs);
     312             : }
     313             : 
     314           0 : static av_cold void uninit(AVFilterContext *ctx)
     315             : {
     316           0 :     MaskedClampContext *s = ctx->priv;
     317             : 
     318           0 :     ff_framesync_uninit(&s->fs);
     319           0 : }
     320             : 
     321             : static const AVFilterPad maskedclamp_inputs[] = {
     322             :     {
     323             :         .name         = "base",
     324             :         .type         = AVMEDIA_TYPE_VIDEO,
     325             :         .config_props = config_input,
     326             :     },
     327             :     {
     328             :         .name         = "dark",
     329             :         .type         = AVMEDIA_TYPE_VIDEO,
     330             :     },
     331             :     {
     332             :         .name         = "bright",
     333             :         .type         = AVMEDIA_TYPE_VIDEO,
     334             :     },
     335             :     { NULL }
     336             : };
     337             : 
     338             : static const AVFilterPad maskedclamp_outputs[] = {
     339             :     {
     340             :         .name          = "default",
     341             :         .type          = AVMEDIA_TYPE_VIDEO,
     342             :         .config_props  = config_output,
     343             :     },
     344             :     { NULL }
     345             : };
     346             : 
     347             : AVFilter ff_vf_maskedclamp = {
     348             :     .name          = "maskedclamp",
     349             :     .description   = NULL_IF_CONFIG_SMALL("Clamp first stream with second stream and third stream."),
     350             :     .priv_size     = sizeof(MaskedClampContext),
     351             :     .uninit        = uninit,
     352             :     .activate      = activate,
     353             :     .query_formats = query_formats,
     354             :     .inputs        = maskedclamp_inputs,
     355             :     .outputs       = maskedclamp_outputs,
     356             :     .priv_class    = &maskedclamp_class,
     357             :     .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
     358             : };

Generated by: LCOV version 1.13