LCOV - code coverage report
Current view: top level - libavfilter - vf_lut2.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 167 0.0 %
Date: 2017-12-18 06:23:41 Functions: 0 15 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 2016 Paul B Mahol
       3             :  *
       4             :  * This file is part of FFmpeg.
       5             :  *
       6             :  * FFmpeg is free software; you can redistribute it and/or
       7             :  * modify it under the terms of the GNU Lesser General Public
       8             :  * License as published by the Free Software Foundation; either
       9             :  * version 2.1 of the License, or (at your option) any later version.
      10             :  *
      11             :  * FFmpeg is distributed in the hope that it will be useful,
      12             :  * but WITHOUT ANY WARRANTY; without even the implied warranty of
      13             :  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
      14             :  * Lesser General Public License for more details.
      15             :  *
      16             :  * You should have received a copy of the GNU Lesser General Public
      17             :  * License along with FFmpeg; if not, write to the Free Software
      18             :  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
      19             :  */
      20             : 
      21             : #include "libavutil/attributes.h"
      22             : #include "libavutil/common.h"
      23             : #include "libavutil/eval.h"
      24             : #include "libavutil/opt.h"
      25             : #include "libavutil/pixdesc.h"
      26             : #include "avfilter.h"
      27             : #include "drawutils.h"
      28             : #include "formats.h"
      29             : #include "internal.h"
      30             : #include "video.h"
      31             : #include "framesync.h"
      32             : 
      33             : static const char *const var_names[] = {
      34             :     "w",        ///< width of the input video
      35             :     "h",        ///< height of the input video
      36             :     "x",        ///< input value for the pixel from input #1
      37             :     "y",        ///< input value for the pixel from input #2
      38             :     "bdx",      ///< input #1 video bitdepth
      39             :     "bdy",      ///< input #2 video bitdepth
      40             :     NULL
      41             : };
      42             : 
      43             : enum var_name {
      44             :     VAR_W,
      45             :     VAR_H,
      46             :     VAR_X,
      47             :     VAR_Y,
      48             :     VAR_BITDEPTHX,
      49             :     VAR_BITDEPTHY,
      50             :     VAR_VARS_NB
      51             : };
      52             : 
      53             : typedef struct LUT2Context {
      54             :     const AVClass *class;
      55             :     FFFrameSync fs;
      56             : 
      57             :     char   *comp_expr_str[4];
      58             : 
      59             :     AVExpr *comp_expr[4];
      60             :     double var_values[VAR_VARS_NB];
      61             :     uint16_t *lut[4];  ///< lookup table for each component
      62             :     int width[4], height[4];
      63             :     int nb_planes;
      64             :     int depth, depthx, depthy;
      65             :     int tlut2;
      66             :     AVFrame *prev_frame;        /* only used with tlut2 */
      67             : 
      68             :     void (*lut2)(struct LUT2Context *s, AVFrame *dst, AVFrame *srcx, AVFrame *srcy);
      69             : 
      70             : } LUT2Context;
      71             : 
      72             : #define OFFSET(x) offsetof(LUT2Context, x)
      73             : #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
      74             : 
      75             : static const AVOption options[] = {
      76             :     { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]),  AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
      77             :     { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]),  AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
      78             :     { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]),  AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
      79             :     { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]),  AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS },
      80             :     { NULL }
      81             : };
      82             : 
      83           0 : static av_cold void uninit(AVFilterContext *ctx)
      84             : {
      85           0 :     LUT2Context *s = ctx->priv;
      86             :     int i;
      87             : 
      88           0 :     ff_framesync_uninit(&s->fs);
      89           0 :     av_frame_free(&s->prev_frame);
      90             : 
      91           0 :     for (i = 0; i < 4; i++) {
      92           0 :         av_expr_free(s->comp_expr[i]);
      93           0 :         s->comp_expr[i] = NULL;
      94           0 :         av_freep(&s->comp_expr_str[i]);
      95           0 :         av_freep(&s->lut[i]);
      96             :     }
      97           0 : }
      98             : 
      99           0 : static int query_formats(AVFilterContext *ctx)
     100             : {
     101             :     static const enum AVPixelFormat pix_fmts[] = {
     102             :         AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P,
     103             :         AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
     104             :         AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P,
     105             :         AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
     106             :         AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
     107             :         AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
     108             :         AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
     109             :         AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12,
     110             :         AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
     111             :         AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
     112             :         AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
     113             :         AV_PIX_FMT_GBRP12,
     114             :         AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12,
     115             :         AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12,
     116             :         AV_PIX_FMT_NONE
     117             :     };
     118             : 
     119           0 :     return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
     120             : }
     121             : 
     122           0 : static int config_inputx(AVFilterLink *inlink)
     123             : {
     124           0 :     AVFilterContext *ctx = inlink->dst;
     125           0 :     LUT2Context *s = ctx->priv;
     126           0 :     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
     127           0 :     int hsub = desc->log2_chroma_w;
     128           0 :     int vsub = desc->log2_chroma_h;
     129             : 
     130           0 :     s->nb_planes = av_pix_fmt_count_planes(inlink->format);
     131           0 :     s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub);
     132           0 :     s->height[0] = s->height[3] = inlink->h;
     133           0 :     s->width[1]  = s->width[2]  = AV_CEIL_RSHIFT(inlink->w, hsub);
     134           0 :     s->width[0]  = s->width[3]  = inlink->w;
     135             : 
     136           0 :     s->var_values[VAR_W] = inlink->w;
     137           0 :     s->var_values[VAR_H] = inlink->h;
     138           0 :     s->depthx = desc->comp[0].depth;
     139           0 :     s->var_values[VAR_BITDEPTHX] = s->depthx;
     140             : 
     141           0 :     if (s->tlut2) {
     142           0 :         s->depthy = desc->comp[0].depth;
     143           0 :         s->var_values[VAR_BITDEPTHY] = s->depthy;
     144             :     }
     145             : 
     146           0 :     return 0;
     147             : }
     148             : 
     149           0 : static int config_inputy(AVFilterLink *inlink)
     150             : {
     151           0 :     AVFilterContext *ctx = inlink->dst;
     152           0 :     LUT2Context *s = ctx->priv;
     153           0 :     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
     154             : 
     155           0 :     s->depthy = desc->comp[0].depth;
     156           0 :     s->var_values[VAR_BITDEPTHY] = s->depthy;
     157             : 
     158           0 :     return 0;
     159             : }
     160             : 
     161           0 : static void lut2_8bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
     162             : {
     163             :     int p, y, x;
     164             : 
     165           0 :     for (p = 0; p < s->nb_planes; p++) {
     166           0 :         const uint16_t *lut = s->lut[p];
     167             :         const uint8_t *srcxx, *srcyy;
     168             :         uint8_t *dst;
     169             : 
     170           0 :         dst   = out->data[p];
     171           0 :         srcxx = srcx->data[p];
     172           0 :         srcyy = srcy->data[p];
     173             : 
     174           0 :         for (y = 0; y < s->height[p]; y++) {
     175           0 :             for (x = 0; x < s->width[p]; x++) {
     176           0 :                 dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
     177             :             }
     178             : 
     179           0 :             dst   += out->linesize[p];
     180           0 :             srcxx += srcx->linesize[p];
     181           0 :             srcyy += srcy->linesize[p];
     182             :         }
     183             :     }
     184           0 : }
     185             : 
     186           0 : static void lut2_16bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy)
     187             : {
     188             :     int p, y, x;
     189             : 
     190           0 :     for (p = 0; p < s->nb_planes; p++) {
     191           0 :         const uint16_t *lut = s->lut[p];
     192             :         const uint16_t *srcxx, *srcyy;
     193             :         uint16_t *dst;
     194             : 
     195           0 :         dst   = (uint16_t *)out->data[p];
     196           0 :         srcxx = (uint16_t *)srcx->data[p];
     197           0 :         srcyy = (uint16_t *)srcy->data[p];
     198             : 
     199           0 :         for (y = 0; y < s->height[p]; y++) {
     200           0 :             for (x = 0; x < s->width[p]; x++) {
     201           0 :                 dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]];
     202             :             }
     203             : 
     204           0 :             dst   += out->linesize[p]  / 2;
     205           0 :             srcxx += srcx->linesize[p] / 2;
     206           0 :             srcyy += srcy->linesize[p] / 2;
     207             :         }
     208             :     }
     209           0 : }
     210             : 
     211           0 : static int process_frame(FFFrameSync *fs)
     212             : {
     213           0 :     AVFilterContext *ctx = fs->parent;
     214           0 :     LUT2Context *s = fs->opaque;
     215           0 :     AVFilterLink *outlink = ctx->outputs[0];
     216           0 :     AVFrame *out, *srcx = NULL, *srcy = NULL;
     217             :     int ret;
     218             : 
     219           0 :     if ((ret = ff_framesync_get_frame(&s->fs, 0, &srcx, 0)) < 0 ||
     220           0 :         (ret = ff_framesync_get_frame(&s->fs, 1, &srcy, 0)) < 0)
     221           0 :         return ret;
     222             : 
     223           0 :     if (ctx->is_disabled || !srcy) {
     224           0 :         out = av_frame_clone(srcx);
     225           0 :         if (!out)
     226           0 :             return AVERROR(ENOMEM);
     227             :     } else {
     228           0 :         out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
     229           0 :         if (!out)
     230           0 :             return AVERROR(ENOMEM);
     231           0 :         av_frame_copy_props(out, srcx);
     232             : 
     233           0 :         s->lut2(s, out, srcx, srcy);
     234             :     }
     235             : 
     236           0 :     out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
     237             : 
     238           0 :     return ff_filter_frame(outlink, out);
     239             : }
     240             : 
     241           0 : static int config_output(AVFilterLink *outlink)
     242             : {
     243           0 :     AVFilterContext *ctx = outlink->src;
     244           0 :     LUT2Context *s = ctx->priv;
     245             :     int p, ret;
     246             : 
     247           0 :     s->depth = s->depthx + s->depthy;
     248             : 
     249           0 :     s->lut2 = s->depth > 16 ? lut2_16bit : lut2_8bit;
     250             : 
     251           0 :     for (p = 0; p < s->nb_planes; p++) {
     252           0 :         s->lut[p] = av_malloc_array(1 << s->depth, sizeof(uint16_t));
     253           0 :         if (!s->lut[p])
     254           0 :             return AVERROR(ENOMEM);
     255             :     }
     256             : 
     257           0 :     for (p = 0; p < s->nb_planes; p++) {
     258             :         double res;
     259             :         int x, y;
     260             : 
     261             :         /* create the parsed expression */
     262           0 :         av_expr_free(s->comp_expr[p]);
     263           0 :         s->comp_expr[p] = NULL;
     264           0 :         ret = av_expr_parse(&s->comp_expr[p], s->comp_expr_str[p],
     265             :                             var_names, NULL, NULL, NULL, NULL, 0, ctx);
     266           0 :         if (ret < 0) {
     267           0 :             av_log(ctx, AV_LOG_ERROR,
     268             :                    "Error when parsing the expression '%s' for the component %d.\n",
     269             :                    s->comp_expr_str[p], p);
     270           0 :             return AVERROR(EINVAL);
     271             :         }
     272             : 
     273             :         /* compute the lut */
     274           0 :         for (y = 0; y < (1 << s->depthx); y++) {
     275           0 :             s->var_values[VAR_Y] = y;
     276           0 :             for (x = 0; x < (1 << s->depthx); x++) {
     277           0 :                 s->var_values[VAR_X] = x;
     278           0 :                 res = av_expr_eval(s->comp_expr[p], s->var_values, s);
     279           0 :                 if (isnan(res)) {
     280           0 :                     av_log(ctx, AV_LOG_ERROR,
     281             :                            "Error when evaluating the expression '%s' for the values %d and %d for the component %d.\n",
     282             :                            s->comp_expr_str[p], x, y, p);
     283           0 :                     return AVERROR(EINVAL);
     284             :                 }
     285             : 
     286           0 :                 s->lut[p][(y << s->depthx) + x] = res;
     287             :             }
     288             :         }
     289             :     }
     290             : 
     291           0 :     return 0;
     292             : }
     293             : 
     294           0 : static int lut2_config_output(AVFilterLink *outlink)
     295             : {
     296           0 :     AVFilterContext *ctx = outlink->src;
     297           0 :     LUT2Context *s = ctx->priv;
     298           0 :     AVFilterLink *srcx = ctx->inputs[0];
     299           0 :     AVFilterLink *srcy = ctx->inputs[1];
     300             :     FFFrameSyncIn *in;
     301             :     int ret;
     302             : 
     303           0 :     if (srcx->format != srcy->format) {
     304           0 :         av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
     305           0 :         return AVERROR(EINVAL);
     306             :     }
     307           0 :     if (srcx->w != srcy->w || srcx->h != srcy->h) {
     308           0 :         av_log(ctx, AV_LOG_ERROR, "First input link %s parameters "
     309             :                "(size %dx%d) do not match the corresponding "
     310             :                "second input link %s parameters (size %dx%d)\n",
     311           0 :                ctx->input_pads[0].name, srcx->w, srcx->h,
     312           0 :                ctx->input_pads[1].name,
     313             :                srcy->w, srcy->h);
     314           0 :         return AVERROR(EINVAL);
     315             :     }
     316             : 
     317           0 :     outlink->w = srcx->w;
     318           0 :     outlink->h = srcx->h;
     319           0 :     outlink->time_base = srcx->time_base;
     320           0 :     outlink->sample_aspect_ratio = srcx->sample_aspect_ratio;
     321           0 :     outlink->frame_rate = srcx->frame_rate;
     322             : 
     323           0 :     if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0)
     324           0 :         return ret;
     325             : 
     326           0 :     in = s->fs.in;
     327           0 :     in[0].time_base = srcx->time_base;
     328           0 :     in[1].time_base = srcy->time_base;
     329           0 :     in[0].sync   = 2;
     330           0 :     in[0].before = EXT_STOP;
     331           0 :     in[0].after  = EXT_INFINITY;
     332           0 :     in[1].sync   = 1;
     333           0 :     in[1].before = EXT_STOP;
     334           0 :     in[1].after  = EXT_INFINITY;
     335           0 :     s->fs.opaque   = s;
     336           0 :     s->fs.on_event = process_frame;
     337             : 
     338           0 :     if ((ret = config_output(outlink)) < 0)
     339           0 :         return ret;
     340             : 
     341           0 :     return ff_framesync_configure(&s->fs);
     342             : }
     343             : 
     344           0 : static int activate(AVFilterContext *ctx)
     345             : {
     346           0 :     LUT2Context *s = ctx->priv;
     347           0 :     return ff_framesync_activate(&s->fs);
     348             : }
     349             : 
     350             : static const AVFilterPad inputs[] = {
     351             :     {
     352             :         .name         = "srcx",
     353             :         .type         = AVMEDIA_TYPE_VIDEO,
     354             :         .config_props = config_inputx,
     355             :     },
     356             :     {
     357             :         .name         = "srcy",
     358             :         .type         = AVMEDIA_TYPE_VIDEO,
     359             :         .config_props = config_inputy,
     360             :     },
     361             :     { NULL }
     362             : };
     363             : 
     364             : static const AVFilterPad outputs[] = {
     365             :     {
     366             :         .name          = "default",
     367             :         .type          = AVMEDIA_TYPE_VIDEO,
     368             :         .config_props  = lut2_config_output,
     369             :     },
     370             :     { NULL }
     371             : };
     372             : 
     373             : #define lut2_options options
     374             : 
     375           0 : FRAMESYNC_DEFINE_CLASS(lut2, LUT2Context, fs);
     376             : 
     377             : AVFilter ff_vf_lut2 = {
     378             :     .name          = "lut2",
     379             :     .description   = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two video inputs."),
     380             :     .preinit       = lut2_framesync_preinit,
     381             :     .priv_size     = sizeof(LUT2Context),
     382             :     .priv_class    = &lut2_class,
     383             :     .uninit        = uninit,
     384             :     .query_formats = query_formats,
     385             :     .activate      = activate,
     386             :     .inputs        = inputs,
     387             :     .outputs       = outputs,
     388             :     .flags         = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
     389             : };
     390             : 
     391             : #if CONFIG_TLUT2_FILTER
     392             : 
     393           0 : static av_cold int init(AVFilterContext *ctx)
     394             : {
     395           0 :     LUT2Context *s = ctx->priv;
     396             : 
     397           0 :     s->tlut2 = !strcmp(ctx->filter->name, "tlut2");
     398             : 
     399           0 :     return 0;
     400             : }
     401             : 
     402           0 : static int tlut2_filter_frame(AVFilterLink *inlink, AVFrame *frame)
     403             : {
     404           0 :     LUT2Context *s = inlink->dst->priv;
     405           0 :     AVFilterLink *outlink = inlink->dst->outputs[0];
     406             : 
     407           0 :     if (s->prev_frame) {
     408           0 :         AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
     409           0 :         if (!out) {
     410           0 :             av_frame_free(&s->prev_frame);
     411           0 :             s->prev_frame = frame;
     412           0 :             return AVERROR(ENOMEM);
     413             :         }
     414           0 :         av_frame_copy_props(out, frame);
     415           0 :         s->lut2(s, out, frame, s->prev_frame);
     416           0 :         av_frame_free(&s->prev_frame);
     417           0 :         s->prev_frame = frame;
     418           0 :         return ff_filter_frame(outlink, out);
     419             :     }
     420           0 :     s->prev_frame = frame;
     421           0 :     return 0;
     422             : }
     423             : 
     424             : #define tlut2_options options
     425             : 
     426             : AVFILTER_DEFINE_CLASS(tlut2);
     427             : 
     428             : static const AVFilterPad tlut2_inputs[] = {
     429             :     {
     430             :         .name          = "default",
     431             :         .type          = AVMEDIA_TYPE_VIDEO,
     432             :         .filter_frame  = tlut2_filter_frame,
     433             :         .config_props  = config_inputx,
     434             :     },
     435             :     { NULL }
     436             : };
     437             : 
     438             : static const AVFilterPad tlut2_outputs[] = {
     439             :     {
     440             :         .name          = "default",
     441             :         .type          = AVMEDIA_TYPE_VIDEO,
     442             :         .config_props  = config_output,
     443             :     },
     444             :     { NULL }
     445             : };
     446             : 
     447             : AVFilter ff_vf_tlut2 = {
     448             :     .name          = "tlut2",
     449             :     .description   = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two successive frames."),
     450             :     .priv_size     = sizeof(LUT2Context),
     451             :     .priv_class    = &tlut2_class,
     452             :     .query_formats = query_formats,
     453             :     .init          = init,
     454             :     .uninit        = uninit,
     455             :     .inputs        = tlut2_inputs,
     456             :     .outputs       = tlut2_outputs,
     457             : };
     458             : 
     459             : #endif

Generated by: LCOV version 1.13