| Line | Branch | Exec | Source |
|---|---|---|---|
| 1 | /* | ||
| 2 | * Copyright (c) 2015 Himangi Saraogi <himangi774@gmail.com> | ||
| 3 | * | ||
| 4 | * This file is part of FFmpeg. | ||
| 5 | * | ||
| 6 | * FFmpeg is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU Lesser General Public | ||
| 8 | * License as published by the Free Software Foundation; either | ||
| 9 | * version 2.1 of the License, or (at your option) any later version. | ||
| 10 | * | ||
| 11 | * FFmpeg is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 14 | * Lesser General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU Lesser General Public | ||
| 17 | * License along with FFmpeg; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 19 | */ | ||
| 20 | |||
| 21 | /** | ||
| 22 | * @file detelecine filter. | ||
| 23 | */ | ||
| 24 | |||
| 25 | |||
| 26 | #include "libavutil/avstring.h" | ||
| 27 | #include "libavutil/imgutils.h" | ||
| 28 | #include "libavutil/opt.h" | ||
| 29 | #include "libavutil/pixdesc.h" | ||
| 30 | #include "avfilter.h" | ||
| 31 | #include "filters.h" | ||
| 32 | #include "formats.h" | ||
| 33 | #include "video.h" | ||
| 34 | |||
| 35 | typedef struct DetelecineContext { | ||
| 36 | const AVClass *class; | ||
| 37 | int first_field; | ||
| 38 | char *pattern; | ||
| 39 | int start_frame; | ||
| 40 | int init_len; | ||
| 41 | unsigned int pattern_pos; | ||
| 42 | unsigned int nskip_fields; | ||
| 43 | int64_t start_time; | ||
| 44 | |||
| 45 | AVRational pts; | ||
| 46 | AVRational ts_unit; | ||
| 47 | int occupied; | ||
| 48 | |||
| 49 | int nb_planes; | ||
| 50 | int planeheight[4]; | ||
| 51 | int stride[4]; | ||
| 52 | |||
| 53 | AVFrame *frame[2]; | ||
| 54 | AVFrame *temp; | ||
| 55 | } DetelecineContext; | ||
| 56 | |||
| 57 | #define OFFSET(x) offsetof(DetelecineContext, x) | ||
| 58 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | ||
| 59 | |||
| 60 | static const AVOption detelecine_options[] = { | ||
| 61 | {"first_field", "select first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, .unit = "field"}, | ||
| 62 | {"top", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, .unit = "field"}, | ||
| 63 | {"t", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, .unit = "field"}, | ||
| 64 | {"bottom", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, .unit = "field"}, | ||
| 65 | {"b", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, .unit = "field"}, | ||
| 66 | {"pattern", "pattern that describe for how many fields a frame is to be displayed", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str="23"}, 0, 0, FLAGS}, | ||
| 67 | {"start_frame", "position of first frame with respect to the pattern if stream is cut", OFFSET(start_frame), AV_OPT_TYPE_INT, {.i64=0}, 0, 13, FLAGS}, | ||
| 68 | {NULL} | ||
| 69 | }; | ||
| 70 | |||
| 71 | AVFILTER_DEFINE_CLASS(detelecine); | ||
| 72 | |||
| 73 | ✗ | static av_cold int init(AVFilterContext *ctx) | |
| 74 | { | ||
| 75 | ✗ | DetelecineContext *s = ctx->priv; | |
| 76 | const char *p; | ||
| 77 | ✗ | int max = 0; | |
| 78 | ✗ | int sum = 0; | |
| 79 | |||
| 80 | ✗ | if (!strlen(s->pattern)) { | |
| 81 | ✗ | av_log(ctx, AV_LOG_ERROR, "No pattern provided.\n"); | |
| 82 | ✗ | return AVERROR_INVALIDDATA; | |
| 83 | } | ||
| 84 | |||
| 85 | ✗ | for (p = s->pattern; *p; p++) { | |
| 86 | ✗ | if (!av_isdigit(*p)) { | |
| 87 | ✗ | av_log(ctx, AV_LOG_ERROR, "Provided pattern includes non-numeric characters.\n"); | |
| 88 | ✗ | return AVERROR_INVALIDDATA; | |
| 89 | } | ||
| 90 | |||
| 91 | ✗ | sum += *p - '0'; | |
| 92 | ✗ | max = FFMAX(*p - '0', max); | |
| 93 | ✗ | s->pts.num += *p - '0'; | |
| 94 | ✗ | s->pts.den += 2; | |
| 95 | } | ||
| 96 | |||
| 97 | ✗ | if (s->start_frame >= sum) { | |
| 98 | ✗ | av_log(ctx, AV_LOG_ERROR, "Provided start_frame is too big.\n"); | |
| 99 | ✗ | return AVERROR_INVALIDDATA; | |
| 100 | } | ||
| 101 | |||
| 102 | ✗ | s->nskip_fields = 0; | |
| 103 | ✗ | s->pattern_pos = 0; | |
| 104 | ✗ | s->start_time = AV_NOPTS_VALUE; | |
| 105 | ✗ | s->init_len = 0; | |
| 106 | |||
| 107 | ✗ | if (s->start_frame != 0) { | |
| 108 | ✗ | int nfields = 0; | |
| 109 | ✗ | for (p = s->pattern; *p; p++) { | |
| 110 | ✗ | nfields += *p - '0'; | |
| 111 | ✗ | s->pattern_pos++; | |
| 112 | ✗ | if (nfields >= 2*s->start_frame) { | |
| 113 | ✗ | s->init_len = nfields - 2*s->start_frame; | |
| 114 | ✗ | break; | |
| 115 | } | ||
| 116 | } | ||
| 117 | } | ||
| 118 | |||
| 119 | ✗ | av_log(ctx, AV_LOG_INFO, "Detelecine pattern %s removes up to %d frames per frame, pts advance factor: %d/%d\n", | |
| 120 | ✗ | s->pattern, (max + 1) / 2, s->pts.num, s->pts.den); | |
| 121 | |||
| 122 | ✗ | return 0; | |
| 123 | } | ||
| 124 | |||
| 125 | ✗ | static int query_formats(const AVFilterContext *ctx, | |
| 126 | AVFilterFormatsConfig **cfg_in, | ||
| 127 | AVFilterFormatsConfig **cfg_out) | ||
| 128 | { | ||
| 129 | ✗ | int reject_flags = AV_PIX_FMT_FLAG_BITSTREAM | | |
| 130 | AV_PIX_FMT_FLAG_PAL | | ||
| 131 | AV_PIX_FMT_FLAG_HWACCEL; | ||
| 132 | |||
| 133 | ✗ | return ff_set_common_formats2(ctx, cfg_in, cfg_out, | |
| 134 | ff_formats_pixdesc_filter(0, reject_flags)); | ||
| 135 | } | ||
| 136 | |||
| 137 | ✗ | static int config_input(AVFilterLink *inlink) | |
| 138 | { | ||
| 139 | ✗ | DetelecineContext *s = inlink->dst->priv; | |
| 140 | ✗ | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); | |
| 141 | int ret; | ||
| 142 | |||
| 143 | ✗ | s->temp = ff_get_video_buffer(inlink, inlink->w, inlink->h); | |
| 144 | ✗ | if (!s->temp) | |
| 145 | ✗ | return AVERROR(ENOMEM); | |
| 146 | |||
| 147 | ✗ | s->frame[0] = ff_get_video_buffer(inlink, inlink->w, inlink->h); | |
| 148 | ✗ | if (!s->frame[0]) | |
| 149 | ✗ | return AVERROR(ENOMEM); | |
| 150 | |||
| 151 | ✗ | s->frame[1] = ff_get_video_buffer(inlink, inlink->w, inlink->h); | |
| 152 | ✗ | if (!s->frame[1]) | |
| 153 | ✗ | return AVERROR(ENOMEM); | |
| 154 | |||
| 155 | ✗ | if ((ret = av_image_fill_linesizes(s->stride, inlink->format, inlink->w)) < 0) | |
| 156 | ✗ | return ret; | |
| 157 | |||
| 158 | ✗ | s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); | |
| 159 | ✗ | s->planeheight[0] = s->planeheight[3] = inlink->h; | |
| 160 | |||
| 161 | ✗ | s->nb_planes = av_pix_fmt_count_planes(inlink->format); | |
| 162 | |||
| 163 | ✗ | return 0; | |
| 164 | } | ||
| 165 | |||
| 166 | ✗ | static int config_output(AVFilterLink *outlink) | |
| 167 | { | ||
| 168 | ✗ | FilterLink *outl = ff_filter_link(outlink); | |
| 169 | ✗ | AVFilterContext *ctx = outlink->src; | |
| 170 | ✗ | DetelecineContext *s = ctx->priv; | |
| 171 | ✗ | const AVFilterLink *inlink = ctx->inputs[0]; | |
| 172 | ✗ | const FilterLink *inl = ff_filter_link(ctx->inputs[0]); | |
| 173 | ✗ | AVRational fps = inl->frame_rate; | |
| 174 | |||
| 175 | ✗ | if (!fps.num || !fps.den) { | |
| 176 | ✗ | av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; " | |
| 177 | "current rate of %d/%d is invalid\n", fps.num, fps.den); | ||
| 178 | ✗ | return AVERROR(EINVAL); | |
| 179 | } | ||
| 180 | ✗ | fps = av_mul_q(fps, av_inv_q(s->pts)); | |
| 181 | ✗ | av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n", | |
| 182 | ✗ | inl->frame_rate.num, inl->frame_rate.den, fps.num, fps.den); | |
| 183 | |||
| 184 | ✗ | outl->frame_rate = fps; | |
| 185 | ✗ | outlink->time_base = av_mul_q(inlink->time_base, s->pts); | |
| 186 | ✗ | av_log(ctx, AV_LOG_VERBOSE, "TB: %d/%d -> %d/%d\n", | |
| 187 | ✗ | inlink->time_base.num, inlink->time_base.den, outlink->time_base.num, outlink->time_base.den); | |
| 188 | |||
| 189 | ✗ | s->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base)); | |
| 190 | |||
| 191 | ✗ | return 0; | |
| 192 | } | ||
| 193 | |||
| 194 | ✗ | static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) | |
| 195 | { | ||
| 196 | ✗ | AVFilterContext *ctx = inlink->dst; | |
| 197 | ✗ | AVFilterLink *outlink = ctx->outputs[0]; | |
| 198 | ✗ | FilterLink *outl = ff_filter_link(outlink); | |
| 199 | ✗ | DetelecineContext *s = ctx->priv; | |
| 200 | ✗ | int i, len = 0, ret = 0, out = 0; | |
| 201 | |||
| 202 | ✗ | if (s->start_time == AV_NOPTS_VALUE) | |
| 203 | ✗ | s->start_time = inpicref->pts; | |
| 204 | |||
| 205 | ✗ | if (s->nskip_fields >= 2) { | |
| 206 | ✗ | s->nskip_fields -= 2; | |
| 207 | ✗ | av_frame_free(&inpicref); | |
| 208 | ✗ | return 0; | |
| 209 | ✗ | } else if (s->nskip_fields >= 1) { | |
| 210 | ✗ | for (i = 0; i < s->nb_planes; i++) { | |
| 211 | ✗ | av_image_copy_plane(s->temp->data[i], s->temp->linesize[i], | |
| 212 | ✗ | inpicref->data[i], inpicref->linesize[i], | |
| 213 | s->stride[i], | ||
| 214 | s->planeheight[i]); | ||
| 215 | } | ||
| 216 | ✗ | s->occupied = 1; | |
| 217 | ✗ | s->nskip_fields--; | |
| 218 | ✗ | av_frame_free(&inpicref); | |
| 219 | ✗ | return 0; | |
| 220 | } | ||
| 221 | |||
| 222 | ✗ | if (s->nskip_fields == 0) { | |
| 223 | ✗ | len = s->init_len; | |
| 224 | ✗ | s->init_len = 0; | |
| 225 | ✗ | while(!len && s->pattern[s->pattern_pos]) { | |
| 226 | ✗ | len = s->pattern[s->pattern_pos] - '0'; | |
| 227 | ✗ | s->pattern_pos++; | |
| 228 | } | ||
| 229 | |||
| 230 | ✗ | if (!s->pattern[s->pattern_pos]) | |
| 231 | ✗ | s->pattern_pos = 0; | |
| 232 | |||
| 233 | ✗ | if(!len) { // do not output any field as the entire pattern is zero | |
| 234 | ✗ | av_frame_free(&inpicref); | |
| 235 | ✗ | return 0; | |
| 236 | } | ||
| 237 | |||
| 238 | ✗ | if (len == 1 && s->occupied) { | |
| 239 | ✗ | s->occupied = 0; | |
| 240 | // output THIS image as-is | ||
| 241 | ✗ | for (i = 0; i < s->nb_planes; i++) | |
| 242 | ✗ | av_image_copy_plane(s->frame[out]->data[i], s->frame[out]->linesize[i], | |
| 243 | ✗ | s->temp->data[i], s->temp->linesize[i], | |
| 244 | s->stride[i], | ||
| 245 | s->planeheight[i]); | ||
| 246 | ✗ | len = 0; | |
| 247 | ✗ | while(!len && s->pattern[s->pattern_pos]) { | |
| 248 | ✗ | len = s->pattern[s->pattern_pos] - '0'; | |
| 249 | ✗ | s->pattern_pos++; | |
| 250 | } | ||
| 251 | |||
| 252 | ✗ | if (!s->pattern[s->pattern_pos]) | |
| 253 | ✗ | s->pattern_pos = 0; | |
| 254 | |||
| 255 | ✗ | s->occupied = 0; | |
| 256 | ✗ | ++out; | |
| 257 | } | ||
| 258 | |||
| 259 | ✗ | if (s->occupied) { | |
| 260 | ✗ | for (i = 0; i < s->nb_planes; i++) { | |
| 261 | // fill in the EARLIER field from the new pic | ||
| 262 | ✗ | av_image_copy_plane(s->frame[out]->data[i] + s->frame[out]->linesize[i] * s->first_field, | |
| 263 | ✗ | s->frame[out]->linesize[i] * 2, | |
| 264 | ✗ | inpicref->data[i] + inpicref->linesize[i] * s->first_field, | |
| 265 | ✗ | inpicref->linesize[i] * 2, | |
| 266 | s->stride[i], | ||
| 267 | ✗ | (s->planeheight[i] - s->first_field + 1) / 2); | |
| 268 | // fill in the LATER field from the buffered pic | ||
| 269 | ✗ | av_image_copy_plane(s->frame[out]->data[i] + s->frame[out]->linesize[i] * !s->first_field, | |
| 270 | ✗ | s->frame[out]->linesize[i] * 2, | |
| 271 | ✗ | s->temp->data[i] + s->temp->linesize[i] * !s->first_field, | |
| 272 | ✗ | s->temp->linesize[i] * 2, | |
| 273 | s->stride[i], | ||
| 274 | ✗ | (s->planeheight[i] - !s->first_field + 1) / 2); | |
| 275 | } | ||
| 276 | |||
| 277 | ✗ | s->occupied = 0; | |
| 278 | ✗ | if (len <= 2) { | |
| 279 | ✗ | for (i = 0; i < s->nb_planes; i++) { | |
| 280 | ✗ | av_image_copy_plane(s->temp->data[i], s->temp->linesize[i], | |
| 281 | ✗ | inpicref->data[i], inpicref->linesize[i], | |
| 282 | s->stride[i], | ||
| 283 | s->planeheight[i]); | ||
| 284 | } | ||
| 285 | ✗ | s->occupied = 1; | |
| 286 | } | ||
| 287 | ✗ | ++out; | |
| 288 | ✗ | len = (len >= 3) ? len - 3 : 0; | |
| 289 | } else { | ||
| 290 | ✗ | if (len >= 2) { | |
| 291 | // output THIS image as-is | ||
| 292 | ✗ | for (i = 0; i < s->nb_planes; i++) | |
| 293 | ✗ | av_image_copy_plane(s->frame[out]->data[i], s->frame[out]->linesize[i], | |
| 294 | ✗ | inpicref->data[i], inpicref->linesize[i], | |
| 295 | s->stride[i], | ||
| 296 | s->planeheight[i]); | ||
| 297 | ✗ | len -= 2; | |
| 298 | ✗ | ++out; | |
| 299 | ✗ | } else if (len == 1) { | |
| 300 | // output THIS image as-is | ||
| 301 | ✗ | for (i = 0; i < s->nb_planes; i++) | |
| 302 | ✗ | av_image_copy_plane(s->frame[out]->data[i], s->frame[out]->linesize[i], | |
| 303 | ✗ | inpicref->data[i], inpicref->linesize[i], | |
| 304 | s->stride[i], | ||
| 305 | s->planeheight[i]); | ||
| 306 | |||
| 307 | ✗ | for (i = 0; i < s->nb_planes; i++) { | |
| 308 | ✗ | av_image_copy_plane(s->temp->data[i], s->temp->linesize[i], | |
| 309 | ✗ | inpicref->data[i], inpicref->linesize[i], | |
| 310 | s->stride[i], | ||
| 311 | s->planeheight[i]); | ||
| 312 | } | ||
| 313 | ✗ | s->occupied = 1; | |
| 314 | |||
| 315 | ✗ | len--; | |
| 316 | ✗ | ++out; | |
| 317 | } | ||
| 318 | } | ||
| 319 | |||
| 320 | ✗ | if (len == 1 && s->occupied) | |
| 321 | { | ||
| 322 | ✗ | len--; | |
| 323 | ✗ | s->occupied = 0; | |
| 324 | } | ||
| 325 | } | ||
| 326 | ✗ | s->nskip_fields = len; | |
| 327 | |||
| 328 | ✗ | for (i = 0; i < out; ++i) { | |
| 329 | ✗ | AVFrame *frame = av_frame_clone(s->frame[i]); | |
| 330 | |||
| 331 | ✗ | if (!frame) { | |
| 332 | ✗ | av_frame_free(&inpicref); | |
| 333 | ✗ | return AVERROR(ENOMEM); | |
| 334 | } | ||
| 335 | |||
| 336 | ✗ | av_frame_copy_props(frame, inpicref); | |
| 337 | ✗ | frame->pts = ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time) + | |
| 338 | ✗ | av_rescale(outl->frame_count_in, s->ts_unit.num, | |
| 339 | ✗ | s->ts_unit.den); | |
| 340 | ✗ | ret = ff_filter_frame(outlink, frame); | |
| 341 | } | ||
| 342 | |||
| 343 | ✗ | av_frame_free(&inpicref); | |
| 344 | |||
| 345 | ✗ | return ret; | |
| 346 | } | ||
| 347 | |||
| 348 | ✗ | static av_cold void uninit(AVFilterContext *ctx) | |
| 349 | { | ||
| 350 | ✗ | DetelecineContext *s = ctx->priv; | |
| 351 | |||
| 352 | ✗ | av_frame_free(&s->temp); | |
| 353 | ✗ | av_frame_free(&s->frame[0]); | |
| 354 | ✗ | av_frame_free(&s->frame[1]); | |
| 355 | ✗ | } | |
| 356 | |||
| 357 | static const AVFilterPad detelecine_inputs[] = { | ||
| 358 | { | ||
| 359 | .name = "default", | ||
| 360 | .type = AVMEDIA_TYPE_VIDEO, | ||
| 361 | .filter_frame = filter_frame, | ||
| 362 | .config_props = config_input, | ||
| 363 | }, | ||
| 364 | }; | ||
| 365 | |||
| 366 | static const AVFilterPad detelecine_outputs[] = { | ||
| 367 | { | ||
| 368 | .name = "default", | ||
| 369 | .type = AVMEDIA_TYPE_VIDEO, | ||
| 370 | .config_props = config_output, | ||
| 371 | }, | ||
| 372 | }; | ||
| 373 | |||
| 374 | const FFFilter ff_vf_detelecine = { | ||
| 375 | .p.name = "detelecine", | ||
| 376 | .p.description = NULL_IF_CONFIG_SMALL("Apply an inverse telecine pattern."), | ||
| 377 | .p.priv_class = &detelecine_class, | ||
| 378 | .priv_size = sizeof(DetelecineContext), | ||
| 379 | .init = init, | ||
| 380 | .uninit = uninit, | ||
| 381 | FILTER_INPUTS(detelecine_inputs), | ||
| 382 | FILTER_OUTPUTS(detelecine_outputs), | ||
| 383 | FILTER_QUERY_FUNC2(query_formats), | ||
| 384 | }; | ||
| 385 |